code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
703
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE_ ): for patt, repl in iter(SCREAMING_SNAKE_CASE_ ): lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return f'''bert/{name}''' def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = session.run(SCREAMING_SNAKE_CASE_ ) print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ): lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): for i in range(config.num_hidden_layers ): if base_model: lowercase__ = "" else: lowercase__ = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[ : config.hidden_size, : ] lowercase__ = in_proj_bias[: config.hidden_size] lowercase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ = in_proj_weight[ -config.hidden_size :, : ] lowercase__ = in_proj_bias[-config.hidden_size :] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = DeiTConfig() # all deit models have fine-tuned heads lowercase__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowercase__ = 1000 lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = int(deit_name[-6:-4] ) lowercase__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowercase__ = 192 lowercase__ = 768 lowercase__ = 12 lowercase__ = 3 elif deit_name[9:].startswith("small" ): lowercase__ = 384 lowercase__ = 1536 lowercase__ = 12 lowercase__ = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowercase__ = 1024 lowercase__ = 4096 lowercase__ = 24 lowercase__ = 16 # load original model from timm lowercase__ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase__ = timm_model.state_dict() lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowercase__ = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by DeiTImageProcessor lowercase__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowercase__ = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE_ , crop_size=config.image_size ) lowercase__ = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ = encoding["pixel_values"] lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
704
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
0
import math from datetime import datetime, timedelta def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = year % 19 lowercase__ = year % 4 lowercase__ = year % 7 lowercase__ = math.floor(year / 100 ) lowercase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 ) lowercase__ = leap_day_inhibits / 4 lowercase__ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 lowercase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 lowercase__ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon lowercase__ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(SCREAMING_SNAKE_CASE_ , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(SCREAMING_SNAKE_CASE_ , 4 , 18 ) else: return datetime(SCREAMING_SNAKE_CASE_ , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): lowercase_ = """will be""" if year > datetime.now().year else """was""" print(F'Easter in {year} {tense} {gauss_easter(year)}')
705
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _snake_case ( lowercase__ , unittest.TestCase): # TODO: is there an appropriate internal test set? UpperCamelCase__ : List[Any] ="""ssube/stable-diffusion-x4-upscaler-onnx""" def A__ ( self : List[str], __lowercase : Union[str, Any]=0 ): lowercase__ = floats_tensor((1, 3, 128, 128), rng=random.Random(__lowercase ) ) lowercase__ = torch.manual_seed(__lowercase ) lowercase__ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def A__ ( self : Any ): lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**__lowercase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def A__ ( self : str ): lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**__lowercase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def A__ ( self : Tuple ): lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**__lowercase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def A__ ( self : Tuple ): lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**__lowercase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def A__ ( self : Any ): lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**__lowercase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _snake_case ( unittest.TestCase): @property def A__ ( self : List[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def A__ ( self : Dict ): lowercase__ = ort.SessionOptions() lowercase__ = False return options def A__ ( self : Any ): lowercase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowercase__ = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = "A fantasy landscape, trending on artstation" lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=__lowercase, image=__lowercase, guidance_scale=7.5, num_inference_steps=10, generator=__lowercase, output_type="np", ) lowercase__ = output.images lowercase__ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowercase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def A__ ( self : Any ): lowercase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowercase__ = init_image.resize((128, 128) ) lowercase__ = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) lowercase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=__lowercase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = "A fantasy landscape, trending on artstation" lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=__lowercase, image=__lowercase, guidance_scale=7.5, num_inference_steps=20, generator=__lowercase, output_type="np", ) lowercase__ = output.images lowercase__ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowercase__ = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
706
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
0
from __future__ import annotations class _snake_case : def __init__( self : Tuple, __lowercase : int ): lowercase__ = order # a_{0} ... a_{k} lowercase__ = [1.0] + [0.0] * order # b_{0} ... b_{k} lowercase__ = [1.0] + [0.0] * order # x[n-1] ... x[n-k] lowercase__ = [0.0] * self.order # y[n-1] ... y[n-k] lowercase__ = [0.0] * self.order def A__ ( self : int, __lowercase : list[float], __lowercase : list[float] ): if len(__lowercase ) < self.order: lowercase__ = [1.0, *a_coeffs] if len(__lowercase ) != self.order + 1: lowercase__ = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(__lowercase )}''' ) raise ValueError(__lowercase ) if len(__lowercase ) != self.order + 1: lowercase__ = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(__lowercase )}''' ) raise ValueError(__lowercase ) lowercase__ = a_coeffs lowercase__ = b_coeffs def A__ ( self : List[Any], __lowercase : float ): lowercase__ = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1, self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowercase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowercase__ = self.input_history[:-1] lowercase__ = self.output_history[:-1] lowercase__ = sample lowercase__ = result return result
707
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ): lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = conva_get[:2] lowercase__ = conva_get[2] lowercase__ = size_pa lowercase__ = rate_w lowercase__ = rate_t lowercase__ = [ np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self : Any, __lowercase : List[str] ): # save model dict with pickle lowercase__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowercase, "wb" ) as f: pickle.dump(__lowercase, __lowercase ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Any] ): # read saved model with open(__lowercase, "rb" ) as f: lowercase__ = pickle.load(__lowercase ) # noqa: S301 lowercase__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) lowercase__ = model_dic.get("size_pooling1" ) lowercase__ = model_dic.get("num_bp1" ) lowercase__ = model_dic.get("num_bp2" ) lowercase__ = model_dic.get("num_bp3" ) lowercase__ = model_dic.get("rate_weight" ) lowercase__ = model_dic.get("rate_thre" ) # create model instance lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) # modify model parameter lowercase__ = model_dic.get("w_conv1" ) lowercase__ = model_dic.get("wkj" ) lowercase__ = model_dic.get("vji" ) lowercase__ = model_dic.get("thre_conv1" ) lowercase__ = model_dic.get("thre_bp2" ) lowercase__ = model_dic.get("thre_bp3" ) return conv_ins def A__ ( self : str, __lowercase : List[Any] ): return 1 / (1 + np.exp(-1 * x )) def A__ ( self : List[str], __lowercase : Optional[Any] ): return round(__lowercase, 3 ) def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ): # convolution process lowercase__ = convs[0] lowercase__ = convs[1] lowercase__ = np.shape(__lowercase )[0] # get the data slice of original image data, data_focus lowercase__ = [] for i_focus in range(0, size_data - size_conv + 1, __lowercase ): for j_focus in range(0, size_data - size_conv + 1, __lowercase ): lowercase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__ = [] lowercase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowercase ): lowercase__ = [] for i_focus in range(len(__lowercase ) ): lowercase__ = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape( __lowercase, __lowercase ) data_featuremap.append(__lowercase ) # expanding the data slice to One dimenssion lowercase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowercase ) ) lowercase__ = np.asarray(__lowercase ) return focus_list, data_featuremap def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ): # pooling process lowercase__ = len(featuremaps[0] ) lowercase__ = int(size_map / size_pooling ) lowercase__ = [] for i_map in range(len(__lowercase ) ): lowercase__ = featuremaps[i_map] lowercase__ = [] for i_focus in range(0, __lowercase, __lowercase ): for j_focus in range(0, __lowercase, __lowercase ): lowercase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase ) featuremap_pooled.append(__lowercase ) return featuremap_pooled def A__ ( self : str, __lowercase : Optional[Any] ): # expanding three dimension data to one dimension list lowercase__ = [] for i in range(len(__lowercase ) ): lowercase__ = np.shape(data[i] ) lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] ) lowercase__ = data_listed.getA().tolist()[0] data_expanded.extend(__lowercase ) lowercase__ = np.asarray(__lowercase ) return data_expanded def A__ ( self : Optional[int], __lowercase : Optional[int] ): # expanding matrix to one dimension list lowercase__ = np.asarray(__lowercase ) lowercase__ = np.shape(__lowercase ) lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] ) return data_expanded def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = [] lowercase__ = 0 for i_map in range(__lowercase ): lowercase__ = np.ones((size_map, size_map) ) for i in range(0, __lowercase, __lowercase ): for j in range(0, __lowercase, __lowercase ): lowercase__ = pd_pool[ i_pool ] lowercase__ = i_pool + 1 lowercase__ = np.multiply( __lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) ) pd_all.append(__lowercase ) return pd_all def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(__lowercase )) ) print((" - - Shape: Teach_Data ", np.shape(__lowercase )) ) lowercase__ = 0 lowercase__ = [] lowercase__ = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__lowercase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__ = np.asmatrix(datas_train[p] ) lowercase__ = np.asarray(datas_teach[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = np.shape(__lowercase ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__ = np.multiply( (data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.multiply( np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.dot(__lowercase, self.vji ) lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__ = pd_conva_pooled.T.getA().tolist() lowercase__ = self._calculate_gradient_from_pool( __lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__ = self._expand_mat(pd_conva_all[k_conv] ) lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase ) lowercase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__ = rp + 1 lowercase__ = error_count / patterns all_mse.append(__lowercase ) def draw_error(): lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowercase, "+-" ) plt.plot(__lowercase, "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(__lowercase, alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self : List[str], __lowercase : Optional[int] ): # model predict lowercase__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(__lowercase )) ) for p in range(len(__lowercase ) ): lowercase__ = np.asmatrix(datas_test[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = bp_outa * self.vji.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = bp_outa * self.wkj.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out] return np.asarray(__lowercase ) def A__ ( self : int, __lowercase : Any ): # return the data of image after convoluting process so we can check it out lowercase__ = np.asmatrix(__lowercase ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
0
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def __lowerCAmelCase ( ): lowercase__ = 10 lowercase__ = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) lowercase__ = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(SCREAMING_SNAKE_CASE_ ) ), } , features=SCREAMING_SNAKE_CASE_ , ) return dataset @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ ) return filename # FILE_CONTENT + files lowercase_ = """\ Text data. Second line of data.""" @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt" lowercase__ = FILE_CONTENT with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return filename @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): import bza lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" lowercase__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with bza.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): import gzip lowercase__ = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) lowercase__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with gzip.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if datasets.config.LZ4_AVAILABLE: import lza.frame lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" lowercase__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with lza.frame.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if datasets.config.PY7ZR_AVAILABLE: import pyazr lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as archive: archive.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): import tarfile lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): import lzma lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.xz" lowercase__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with lzma.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): import zipfile lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowercase__ = tmp_path_factory.mktemp("data" ) / "file.txt.zst" lowercase__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "file.xml" lowercase__ = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return filename lowercase_ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] lowercase_ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] lowercase_ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } lowercase_ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] lowercase_ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope="session" ) def __lowerCAmelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE_ ) ) as con: lowercase__ = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(SCREAMING_SNAKE_CASE_ , "w" , newline="" ) as f: lowercase__ = csv.DictWriter(SCREAMING_SNAKE_CASE_ , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(SCREAMING_SNAKE_CASE_ , "w" , newline="" ) as f: lowercase__ = csv.DictWriter(SCREAMING_SNAKE_CASE_ , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): import bza lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f: lowercase__ = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) lowercase__ = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: lowercase__ = pq.ParquetWriter(SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ ) lowercase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE_ ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE_ ) writer.write_table(SCREAMING_SNAKE_CASE_ ) writer.close() return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) lowercase__ = {"data": DATA} with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) lowercase__ = {"data": DATA_DICT_OF_LISTS} with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in DATA: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in DATA: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in DATA_312: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in DATA_STR: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): import gzip lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(SCREAMING_SNAKE_CASE_ , "rb" ) as orig_file: with gzip.open(SCREAMING_SNAKE_CASE_ , "wb" ) as zipped_file: zipped_file.writelines(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): import gzip lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(SCREAMING_SNAKE_CASE_ , "rb" ) as orig_file: with gzip.open(SCREAMING_SNAKE_CASE_ , "wb" ) as zipped_file: zipped_file.writelines(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = ["0", "1", "2", "3"] lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = ["0", "1", "2", "3"] lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = ["0", "1", "2", "3"] lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(SCREAMING_SNAKE_CASE_ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename("unsupported.ext" ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) lowercase__ = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( ): return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def __lowerCAmelCase ( ): return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) ) f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ).replace(".jpg" , "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) return data_dir
708
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
0
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : @staticmethod def A__ ( *__lowercase : List[str], **__lowercase : int ): pass @is_pipeline_test @require_vision class _snake_case ( unittest.TestCase): @require_torch def A__ ( self : List[str] ): lowercase__ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], ], ) @require_tf def A__ ( self : Any ): lowercase__ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowercase ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], ], ) @slow @require_torch def A__ ( self : int ): lowercase__ = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowercase ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def A__ ( self : List[Any] ): lowercase__ = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowercase ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
709
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _snake_case ( lowercase__): def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ): lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ), " " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ), pre_tokenizers.Digits(individual_digits=__lowercase ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowercase, __lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) if isinstance(__lowercase, __lowercase ): lowercase__ = [files] self._tokenizer.train(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : str ): lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
37
0
from __future__ import annotations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ) return n == n[::-1] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ): lowercase__ = 0 for i in range(1 , SCREAMING_SNAKE_CASE_ ): if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
710
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _snake_case ( unittest.TestCase): def A__ ( self : List[str] ): lowercase__ = inspect.getfile(accelerate.test_utils ) lowercase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) lowercase__ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) lowercase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A__ ( self : int ): print(F'''Found {torch.cuda.device_count()} devices.''' ) lowercase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowercase, env=os.environ.copy() ) @require_multi_gpu def A__ ( self : Any ): print(F'''Found {torch.cuda.device_count()} devices.''' ) lowercase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowercase, env=os.environ.copy() ) @require_multi_gpu def A__ ( self : int ): lowercase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowercase, env=os.environ.copy() ) @require_multi_gpu def A__ ( self : str ): print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) lowercase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1" ): execute_subprocess_async(__lowercase, env=os.environ.copy() ) if __name__ == "__main__": lowercase_ = Accelerator() lowercase_ = (accelerator.state.process_index + 2, 10) lowercase_ = torch.randint(0, 10, shape).to(accelerator.device) lowercase_ = """""" lowercase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
711
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
0
from math import pi def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
712
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowercase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) lowercase__ = value else: lowercase__ = value return new_state_dict def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = "" if is_panoptic: lowercase__ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[:256, :] lowercase__ = in_proj_bias[:256] lowercase__ = in_proj_weight[256:512, :] lowercase__ = in_proj_bias[256:512] lowercase__ = in_proj_weight[-256:, :] lowercase__ = in_proj_bias[-256:] def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowercase__ = "resnet101" if "dc5" in model_name: lowercase__ = True lowercase__ = "panoptic" in model_name if is_panoptic: lowercase__ = 250 else: lowercase__ = 91 lowercase__ = "huggingface/label-files" lowercase__ = "coco-detection-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} # load image processor lowercase__ = "coco_panoptic" if is_panoptic else "coco_detection" lowercase__ = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE_ ) # prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) lowercase__ = encoding["pixel_values"] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub lowercase__ = torch.hub.load("DeppMeng/ConditionalDETR" , SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval() lowercase__ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowercase__ = "conditional_detr." + src rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = rename_backbone_keys(SCREAMING_SNAKE_CASE_ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase__ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val # finally, create HuggingFace model and load state dict lowercase__ = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion lowercase__ = conditional_detr(SCREAMING_SNAKE_CASE_ ) lowercase__ = model(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowercase_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
713
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : int, __lowercase : List[Any], __lowercase : Union[str, Any]=7, __lowercase : str=3, __lowercase : Optional[Any]=18, __lowercase : Tuple=30, __lowercase : Any=400, __lowercase : Any=True, __lowercase : str=None, __lowercase : Optional[int]=True, ): lowercase__ = size if size is not None else {"height": 18, "width": 18} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = apply_ocr def A__ ( self : Optional[int] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : List[str] =LayoutLMvaImageProcessor if is_pytesseract_available() else None def A__ ( self : Optional[int] ): lowercase__ = LayoutLMvaImageProcessingTester(self ) @property def A__ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : int ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "apply_ocr" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 18} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) def A__ ( self : Any ): pass def A__ ( self : List[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertIsInstance(encoding.words, __lowercase ) self.assertIsInstance(encoding.boxes, __lowercase ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : Any ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : List[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : List[str] ): # with apply_OCR = True lowercase__ = LayoutLMvaImageProcessor() from datasets import load_dataset lowercase__ = load_dataset("hf-internal-testing/fixtures_docvqa", split="test" ) lowercase__ = Image.open(ds[0]["file"] ).convert("RGB" ) lowercase__ = image_processing(__lowercase, return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowercase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowercase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, __lowercase ) self.assertListEqual(encoding.boxes, __lowercase ) # with apply_OCR = False lowercase__ = LayoutLMvaImageProcessor(apply_ocr=__lowercase ) lowercase__ = image_processing(__lowercase, return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
0
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
715
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class _snake_case ( lowercase__): UpperCamelCase__ : Tuple ="""umt5""" UpperCamelCase__ : Optional[int] =["""past_key_values"""] def __init__( self : Dict, __lowercase : str=25_0112, __lowercase : Tuple=512, __lowercase : str=64, __lowercase : List[Any]=1024, __lowercase : Dict=8, __lowercase : List[str]=None, __lowercase : List[str]=6, __lowercase : Dict=32, __lowercase : Optional[int]=128, __lowercase : int=0.1, __lowercase : Dict=1e-6, __lowercase : List[str]=1.0, __lowercase : int="gated-gelu", __lowercase : List[str]=True, __lowercase : Optional[int]=True, __lowercase : Optional[int]="T5Tokenizer", __lowercase : List[str]=True, __lowercase : Any=0, __lowercase : int=1, __lowercase : List[Any]=0, **__lowercase : List[Any], ): super().__init__( is_encoder_decoder=__lowercase, tokenizer_class=__lowercase, tie_word_embeddings=__lowercase, pad_token_id=__lowercase, eos_token_id=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split("-" ) lowercase__ = act_info[-1] lowercase__ = act_info[0] == "gated" if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowercase__ = "gelu_new" @property def A__ ( self : int ): return self.d_model @property def A__ ( self : int ): return self.num_heads @property def A__ ( self : str ): return self.num_layers class _snake_case ( lowercase__): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def A__ ( self : Dict ): lowercase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowercase__ = "past_encoder_sequence + sequence" lowercase__ = {0: "batch"} lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowercase__ = {0: "batch", 1: "decoder_sequence"} lowercase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowercase, direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def A__ ( self : Optional[Any] ): return 13 @property def A__ ( self : Optional[int] ): return 5e-4
716
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
0
from math import factorial, pi def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 30 ): if not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) lowercase__ = float(SCREAMING_SNAKE_CASE_ ) lowercase__ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE_ ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 30 ): if not isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) lowercase__ = float(SCREAMING_SNAKE_CASE_ ) lowercase__ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
717
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
0
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _snake_case : UpperCamelCase__ : int UpperCamelCase__ : TreeNode | None =None UpperCamelCase__ : TreeNode | None =None lowercase_ = namedtuple("""CoinsDistribResult""", """moves excess""") def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if root is None: return 0 # Validation def count_nodes(SCREAMING_SNAKE_CASE_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(SCREAMING_SNAKE_CASE_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(SCREAMING_SNAKE_CASE_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowercase__ , lowercase__ = get_distrib(node.left ) lowercase__ , lowercase__ = get_distrib(node.right ) lowercase__ = 1 - left_distrib_excess lowercase__ = 1 - right_distrib_excess lowercase__ = ( left_distrib_moves + right_distrib_moves + abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return get_distrib(SCREAMING_SNAKE_CASE_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
718
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =XGLMTokenizer UpperCamelCase__ : Dict =XGLMTokenizerFast UpperCamelCase__ : Tuple =True UpperCamelCase__ : List[str] =True def A__ ( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(__lowercase, keep_accents=__lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def A__ ( self : Union[str, Any] ): lowercase__ = "<pad>" lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ), __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ), __lowercase ) def A__ ( self : Tuple ): lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], "<s>" ) self.assertEqual(vocab_keys[1], "<pad>" ) self.assertEqual(len(__lowercase ), 1008 ) def A__ ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size, 1008 ) def A__ ( self : List[str] ): lowercase__ = XGLMTokenizer(__lowercase, keep_accents=__lowercase ) lowercase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowercase, ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) lowercase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowercase, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) lowercase__ = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual( __lowercase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) lowercase__ = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def A__ ( self : List[str] ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def A__ ( self : int ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowercase, f.name ) lowercase__ = XGLMTokenizer(f.name, keep_accents=__lowercase ) lowercase__ = pickle.dumps(__lowercase ) pickle.loads(__lowercase ) def A__ ( self : Dict ): if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = "I was born in 92000, and this is falsé." lowercase__ = tokenizer.tokenize(__lowercase ) lowercase__ = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) lowercase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase ) lowercase__ = rust_tokenizer.encode(__lowercase, add_special_tokens=__lowercase ) self.assertListEqual(__lowercase, __lowercase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(__lowercase ) lowercase__ = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) @slow def A__ ( self : List[Any] ): lowercase__ = "Hello World!" lowercase__ = [2, 3_1227, 4447, 35] self.assertListEqual(__lowercase, self.big_tokenizer.encode(__lowercase ) ) @slow def A__ ( self : Union[str, Any] ): lowercase__ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off lowercase__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(__lowercase, self.big_tokenizer.encode(__lowercase ) ) @slow def A__ ( self : Tuple ): # fmt: off lowercase__ = { "input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase, model_name="facebook/xglm-564M", padding=__lowercase, )
719
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """facebook/nllb-large-en-ro""": 1024, """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off lowercase_ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class _snake_case ( lowercase__): UpperCamelCase__ : str =VOCAB_FILES_NAMES UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[int] =["""input_ids""", """attention_mask"""] UpperCamelCase__ : Optional[int] =NllbTokenizer UpperCamelCase__ : List[int] =[] UpperCamelCase__ : List[int] =[] def __init__( self : Dict, __lowercase : Union[str, Any]=None, __lowercase : List[Any]=None, __lowercase : Dict="<s>", __lowercase : List[str]="</s>", __lowercase : Tuple="</s>", __lowercase : Dict="<s>", __lowercase : Tuple="<unk>", __lowercase : Union[str, Any]="<pad>", __lowercase : Any="<mask>", __lowercase : Any=None, __lowercase : List[Any]=None, __lowercase : Any=None, __lowercase : Optional[int]=False, **__lowercase : str, ): # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token lowercase__ = legacy_behaviour super().__init__( vocab_file=__lowercase, tokenizer_file=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, src_lang=__lowercase, tgt_lang=__lowercase, additional_special_tokens=__lowercase, legacy_behaviour=__lowercase, **__lowercase, ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) lowercase__ = { lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase__ = src_lang if src_lang is not None else "eng_Latn" lowercase__ = self.convert_tokens_to_ids(self._src_lang ) lowercase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A__ ( self : Optional[Any] ): return self._src_lang @src_lang.setter def A__ ( self : Optional[Any], __lowercase : str ): lowercase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A__ ( self : Optional[int], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A__ ( self : List[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : List[str], __lowercase : List[str], __lowercase : str, __lowercase : Optional[str], __lowercase : Optional[str], **__lowercase : Optional[int] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowercase__ = src_lang lowercase__ = self(__lowercase, add_special_tokens=__lowercase, return_tensors=__lowercase, **__lowercase ) lowercase__ = self.convert_tokens_to_ids(__lowercase ) lowercase__ = tgt_lang_id return inputs def A__ ( self : str, __lowercase : List[str], __lowercase : str = "eng_Latn", __lowercase : Optional[List[str]] = None, __lowercase : str = "fra_Latn", **__lowercase : Dict, ): lowercase__ = src_lang lowercase__ = tgt_lang return super().prepare_seqaseq_batch(__lowercase, __lowercase, **__lowercase ) def A__ ( self : Any ): return self.set_src_lang_special_tokens(self.src_lang ) def A__ ( self : Tuple ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A__ ( self : Dict, __lowercase : Any ): lowercase__ = self.convert_tokens_to_ids(__lowercase ) if self.legacy_behaviour: lowercase__ = [] lowercase__ = [self.eos_token_id, self.cur_lang_code] else: lowercase__ = [self.cur_lang_code] lowercase__ = [self.eos_token_id] lowercase__ = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase__ = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def A__ ( self : Any, __lowercase : str ): lowercase__ = self.convert_tokens_to_ids(__lowercase ) if self.legacy_behaviour: lowercase__ = [] lowercase__ = [self.eos_token_id, self.cur_lang_code] else: lowercase__ = [self.cur_lang_code] lowercase__ = [self.eos_token_id] lowercase__ = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase__ = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def A__ ( self : Optional[int], __lowercase : str, __lowercase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file, __lowercase ) return (out_vocab_file,)
720
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
721
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
0
from __future__ import annotations def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: UpperCamelCase : Optional[int] = get_failure_array(_lowerCAmelCase ) # 2) Step through text searching for pattern UpperCamelCase , UpperCamelCase : Optional[Any] = 0, 0 # index into text, pattern while i < len(_lowerCAmelCase ): if pattern[j] == text[i]: if j == (len(_lowerCAmelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: UpperCamelCase : str = failure[j - 1] continue i += 1 return False def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : List[Any] = [0] UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = 1 while j < len(_lowerCAmelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: UpperCamelCase : Optional[int] = failure[i - 1] continue j += 1 failure.append(_lowerCAmelCase ) return failure if __name__ == "__main__": # Test 1) __lowerCamelCase : Optional[Any] = """abc1abc12""" __lowerCamelCase : int = """alskfjaldsabc1abc1abc12k23adsfabcabc""" __lowerCamelCase : Tuple = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __lowerCamelCase : List[Any] = """ABABX""" __lowerCamelCase : Dict = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) __lowerCamelCase : Optional[int] = """AAAB""" __lowerCamelCase : Any = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) __lowerCamelCase : Optional[Any] = """abcdabcy""" __lowerCamelCase : Optional[Any] = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) __lowerCamelCase : str = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
38
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A_ ( ) -> Dict: UpperCamelCase : Tuple = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_lowerCAmelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_lowerCAmelCase ) return parser.parse_args() def A_ ( ) -> Optional[int]: UpperCamelCase : Tuple = parse_args() # Import training_script as a module. UpperCamelCase : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase : List[Any] = script_fpath.stem UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase ) # Patch sys.argv UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
38
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class A__ : _UpperCAmelCase :List[str] _UpperCAmelCase :Optional[str] = None # Automatically constructed _UpperCAmelCase :ClassVar[str] = "dict" _UpperCAmelCase :ClassVar[Any] = None _UpperCAmelCase :str = field(default='Translation' , init=__snake_case , repr=__snake_case ) def __call__( self ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __UpperCamelCase( self ): '''simple docstring''' from .features import Value return {k: Value("string" ) for k in sorted(self.languages )} @dataclass class A__ : _UpperCAmelCase :Optional[List] = None _UpperCAmelCase :Optional[int] = None _UpperCAmelCase :Optional[str] = None # Automatically constructed _UpperCAmelCase :ClassVar[str] = "dict" _UpperCAmelCase :ClassVar[Any] = None _UpperCAmelCase :str = field(default='TranslationVariableLanguages' , init=__snake_case , repr=__snake_case ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None UpperCamelCase : str = len(self.languages ) if self.languages else None def __call__( self ): '''simple docstring''' return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = set(self.languages ) if self.languages and set(A_ ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({", ".join(A_ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. UpperCamelCase : List[str] = [] for lang, text in translation_dict.items(): if isinstance(A_ , A_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. UpperCamelCase , UpperCamelCase : Dict = zip(*sorted(A_ ) ) return {"language": languages, "translation": translations} def __UpperCamelCase( self ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("string" ) ), "translation": Sequence(Value("string" ) ), }
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
from __future__ import annotations def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: # noqa: E741 while r - l > 1: UpperCamelCase : Tuple = (l + r) // 2 if v[m] >= key: UpperCamelCase : Dict = m else: UpperCamelCase : str = m # noqa: E741 return r def A_ ( _lowerCAmelCase ) -> int: if len(_lowerCAmelCase ) == 0: return 0 UpperCamelCase : Any = [0] * len(_lowerCAmelCase ) UpperCamelCase : List[Any] = 1 UpperCamelCase : List[str] = v[0] for i in range(1 , len(_lowerCAmelCase ) ): if v[i] < tail[0]: UpperCamelCase : Dict = v[i] elif v[i] > tail[length - 1]: UpperCamelCase : List[Any] = v[i] length += 1 else: UpperCamelCase : int = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18} UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : Union[str, Any] = num_frames UpperCamelCase : Any = image_size UpperCamelCase : Tuple = min_resolution UpperCamelCase : Optional[Any] = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Tuple = size UpperCamelCase : List[Any] = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Any = image_std UpperCamelCase : str = crop_size def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = VivitImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
38
1
def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : Optional[int] = [0 for i in range(len(_lowerCAmelCase ) )] # initialize interval's left pointer and right pointer UpperCamelCase , UpperCamelCase : Optional[Any] = 0, 0 for i in range(1 , len(_lowerCAmelCase ) ): # case when current index is inside the interval if i <= right_pointer: UpperCamelCase : str = min(right_pointer - i + 1 , z_result[i - left_pointer] ) UpperCamelCase : Any = min_edge while go_next(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: UpperCamelCase , UpperCamelCase : str = i, i + z_result[i] - 1 return z_result def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: return i + z_result[i] < len(_lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]] def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : List[Any] = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string UpperCamelCase : int = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_lowerCAmelCase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
38
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase : Dict = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase : Tuple = { """facebook/blenderbot_small-90M""": 512, } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , ) UpperCamelCase : Union[str, Any] = add_prefix_space def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
38
1
import os import sys import unittest __lowerCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __lowerCamelCase : Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") __lowerCamelCase : List[Any] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = get_test_to_tester_mapping(A_ ) UpperCamelCase : List[str] = get_test_to_tester_mapping(A_ ) UpperCamelCase : int = {"BertModelTest": "BertModelTester"} UpperCamelCase : List[Any] = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = get_model_to_test_mapping(A_ ) UpperCamelCase : List[str] = get_model_to_test_mapping(A_ ) UpperCamelCase : List[str] = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } UpperCamelCase : int = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = get_model_to_tester_mapping(A_ ) UpperCamelCase : Tuple = get_model_to_tester_mapping(A_ ) UpperCamelCase : Union[str, Any] = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } UpperCamelCase : Tuple = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : int = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class A__ ( __snake_case ): def __lt__( self , A_ ): '''simple docstring''' return self[-1] < other[-1] def __eq__( self , A_ ): '''simple docstring''' return self[-1] == other[-1] def A_ ( _lowerCAmelCase ) -> list: UpperCamelCase : list[Stack] = [] # sort into stacks for element in collection: UpperCamelCase : int = Stack([element] ) UpperCamelCase : Tuple = bisect_left(_lowerCAmelCase , _lowerCAmelCase ) if i != len(_lowerCAmelCase ): stacks[i].append(_lowerCAmelCase ) else: stacks.append(_lowerCAmelCase ) # use a heap-based merge to merge stack efficiently UpperCamelCase : List[Any] = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) ) return collection if __name__ == "__main__": __lowerCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
38
import logging import os import threading import time try: import warnings except ImportError: __lowerCamelCase : str = None try: import msvcrt except ImportError: __lowerCamelCase : str = None try: import fcntl except ImportError: __lowerCamelCase : List[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowerCamelCase : Union[str, Any] = OSError # Data # ------------------------------------------------ __lowerCamelCase : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowerCamelCase : Union[str, Any] = """3.0.12""" __lowerCamelCase : Any = None def A_ ( ) -> List[Any]: global _logger UpperCamelCase : Any = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = lock_file return None def __str__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.lock.release() return None class A__ : def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase : List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase : Tuple = None # The default timeout value. UpperCamelCase : Optional[Any] = timeout # We use this lock primarily for the lock counter. UpperCamelCase : Union[str, Any] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase : Dict = 0 return None @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file @property def __UpperCamelCase( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = float(A_ ) return None def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCamelCase( self , A_=None , A_=0.05 ): '''simple docstring''' if timeout is None: UpperCamelCase : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase : Dict = id(self ) UpperCamelCase : List[str] = self._lock_file UpperCamelCase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase( self , A_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase : List[Any] = id(self ) UpperCamelCase : Dict = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCamelCase : Dict = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=A_ ) return None def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase : Optional[int] = os.path.dirname(A_ ) UpperCamelCase : int = str(hash(A_ ) ) UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(A_ , A_ ) else: return path class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase : str = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self._lock_file_fd UpperCamelCase : str = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase : int = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase : List[str] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self._lock_file_fd UpperCamelCase : List[Any] = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase : Tuple = fd return None def __UpperCamelCase( self ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowerCamelCase : Dict = None if msvcrt: __lowerCamelCase : Any = WindowsFileLock elif fcntl: __lowerCamelCase : Any = UnixFileLock else: __lowerCamelCase : int = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
38
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class A__ ( __snake_case ): def __get__( self , A_ , A_=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) UpperCamelCase : Tuple = "__cached_" + self.fget.__name__ UpperCamelCase : Optional[Any] = getattr(A_ , A_ , A_ ) if cached is None: UpperCamelCase : List[str] = self.fget(A_ ) setattr(A_ , A_ , A_ ) return cached def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[int] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def A_ ( _lowerCAmelCase ) -> Dict: if is_torch_fx_proxy(_lowerCAmelCase ): return True if is_torch_available(): import torch if isinstance(_lowerCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_lowerCAmelCase , np.ndarray ) def A_ ( _lowerCAmelCase ) -> Tuple: return isinstance(_lowerCAmelCase , np.ndarray ) def A_ ( _lowerCAmelCase ) -> Tuple: return _is_numpy(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Any: import torch return isinstance(_lowerCAmelCase , torch.Tensor ) def A_ ( _lowerCAmelCase ) -> Tuple: return False if not is_torch_available() else _is_torch(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Optional[int]: import torch return isinstance(_lowerCAmelCase , torch.device ) def A_ ( _lowerCAmelCase ) -> Any: return False if not is_torch_available() else _is_torch_device(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Union[str, Any]: import torch if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if hasattr(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : str = getattr(_lowerCAmelCase , _lowerCAmelCase ) else: return False return isinstance(_lowerCAmelCase , torch.dtype ) def A_ ( _lowerCAmelCase ) -> str: return False if not is_torch_available() else _is_torch_dtype(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: import tensorflow as tf return isinstance(_lowerCAmelCase , tf.Tensor ) def A_ ( _lowerCAmelCase ) -> Optional[int]: return False if not is_tf_available() else _is_tensorflow(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Optional[int]: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCAmelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_lowerCAmelCase ) return type(_lowerCAmelCase ) == tf.Tensor def A_ ( _lowerCAmelCase ) -> List[str]: return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> List[str]: import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCAmelCase , jnp.ndarray ) def A_ ( _lowerCAmelCase ) -> Union[str, Any]: return False if not is_flax_available() else _is_jax(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Optional[int]: if isinstance(_lowerCAmelCase , (dict, UserDict) ): return {k: to_py_obj(_lowerCAmelCase ) for k, v in obj.items()} elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_py_obj(_lowerCAmelCase ) for o in obj] elif is_tf_tensor(_lowerCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(_lowerCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCAmelCase ): return np.asarray(_lowerCAmelCase ).tolist() elif isinstance(_lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A_ ( _lowerCAmelCase ) -> Dict: if isinstance(_lowerCAmelCase , (dict, UserDict) ): return {k: to_numpy(_lowerCAmelCase ) for k, v in obj.items()} elif isinstance(_lowerCAmelCase , (list, tuple) ): return np.array(_lowerCAmelCase ) elif is_tf_tensor(_lowerCAmelCase ): return obj.numpy() elif is_torch_tensor(_lowerCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCAmelCase ): return np.asarray(_lowerCAmelCase ) else: return obj class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = fields(self ) # Safety and consistency checks if not len(A_ ): raise ValueError(F"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCamelCase : Any = getattr(self , class_fields[0].name ) UpperCamelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(A_ ): if isinstance(A_ , A_ ): UpperCamelCase : str = first_field.items() UpperCamelCase : Dict = True else: try: UpperCamelCase : Optional[int] = iter(A_ ) UpperCamelCase : int = True except TypeError: UpperCamelCase : Optional[int] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(A_ ): if ( not isinstance(A_ , (list, tuple) ) or not len(A_ ) == 2 or not isinstance(element[0] , A_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCamelCase : Dict = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCamelCase : str = element[1] elif first_field is not None: UpperCamelCase : Optional[Any] = first_field else: for field in class_fields: UpperCamelCase : Optional[Any] = getattr(self , field.name ) if v is not None: UpperCamelCase : Dict = v def __delitem__( self , *A_ , **A_ ): '''simple docstring''' raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self , A_ ): '''simple docstring''' if isinstance(A_ , A_ ): UpperCamelCase : List[Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , A_ , A_ ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(A_ , A_ ) super().__setattr__(A_ , A_ ) def __setitem__( self , A_ , A_ ): '''simple docstring''' super().__setitem__(A_ , A_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class A__ ( __snake_case , __snake_case ): @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' raise ValueError( F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class A__ ( __snake_case ): _UpperCAmelCase :str = 'longest' _UpperCAmelCase :Any = 'max_length' _UpperCAmelCase :str = 'do_not_pad' class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'pt' _UpperCAmelCase :List[Any] = 'tf' _UpperCAmelCase :List[str] = 'np' _UpperCAmelCase :List[Any] = 'jax' class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = context_managers UpperCamelCase : List[str] = ExitStack() def __enter__( self ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(A_ ) def __exit__( self , *A_ , **A_ ): '''simple docstring''' self.stack.__exit__(*A_ , **A_ ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[Any] = infer_framework(_lowerCAmelCase ) if framework == "tf": UpperCamelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : Dict = model_class.__name__ UpperCamelCase : int = infer_framework(_lowerCAmelCase ) if framework == "tf": UpperCamelCase : int = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A_ ( _lowerCAmelCase , _lowerCAmelCase = "" , _lowerCAmelCase = "." ) -> Optional[int]: def _flatten_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="." ): for k, v in d.items(): UpperCamelCase : List[Any] = str(_lowerCAmelCase ) + delimiter + str(_lowerCAmelCase ) if parent_key else k if v and isinstance(_lowerCAmelCase , _lowerCAmelCase ): yield from flatten_dict(_lowerCAmelCase , _lowerCAmelCase , delimiter=_lowerCAmelCase ).items() else: yield key, v return dict(_flatten_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) @contextmanager def A_ ( _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]: if is_numpy_array(_lowerCAmelCase ): return np.transpose(_lowerCAmelCase , axes=_lowerCAmelCase ) elif is_torch_tensor(_lowerCAmelCase ): return array.T if axes is None else array.permute(*_lowerCAmelCase ) elif is_tf_tensor(_lowerCAmelCase ): import tensorflow as tf return tf.transpose(_lowerCAmelCase , perm=_lowerCAmelCase ) elif is_jax_tensor(_lowerCAmelCase ): return jnp.transpose(_lowerCAmelCase , axes=_lowerCAmelCase ) else: raise ValueError(F"""Type not supported for transpose: {type(_lowerCAmelCase )}.""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: if is_numpy_array(_lowerCAmelCase ): return np.reshape(_lowerCAmelCase , _lowerCAmelCase ) elif is_torch_tensor(_lowerCAmelCase ): return array.reshape(*_lowerCAmelCase ) elif is_tf_tensor(_lowerCAmelCase ): import tensorflow as tf return tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) elif is_jax_tensor(_lowerCAmelCase ): return jnp.reshape(_lowerCAmelCase , _lowerCAmelCase ) else: raise ValueError(F"""Type not supported for reshape: {type(_lowerCAmelCase )}.""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: if is_numpy_array(_lowerCAmelCase ): return np.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase ) elif is_torch_tensor(_lowerCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCAmelCase ) elif is_tf_tensor(_lowerCAmelCase ): import tensorflow as tf return tf.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase ) elif is_jax_tensor(_lowerCAmelCase ): return jnp.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase ) else: raise ValueError(F"""Type not supported for squeeze: {type(_lowerCAmelCase )}.""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: if is_numpy_array(_lowerCAmelCase ): return np.expand_dims(_lowerCAmelCase , _lowerCAmelCase ) elif is_torch_tensor(_lowerCAmelCase ): return array.unsqueeze(dim=_lowerCAmelCase ) elif is_tf_tensor(_lowerCAmelCase ): import tensorflow as tf return tf.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase ) elif is_jax_tensor(_lowerCAmelCase ): return jnp.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase ) else: raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" ) def A_ ( _lowerCAmelCase ) -> List[str]: if is_numpy_array(_lowerCAmelCase ): return np.size(_lowerCAmelCase ) elif is_torch_tensor(_lowerCAmelCase ): return array.numel() elif is_tf_tensor(_lowerCAmelCase ): import tensorflow as tf return tf.size(_lowerCAmelCase ) elif is_jax_tensor(_lowerCAmelCase ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: for key, value in auto_map.items(): if isinstance(_lowerCAmelCase , (tuple, list) ): UpperCamelCase : Any = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: UpperCamelCase : List[Any] = F"""{repo_id}--{value}""" return auto_map def A_ ( _lowerCAmelCase ) -> int: for base_class in inspect.getmro(_lowerCAmelCase ): UpperCamelCase : List[str] = base_class.__module__ UpperCamelCase : Any = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
38
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str: if config_name_or_path is None: UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCamelCase : Tuple = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCamelCase : Tuple = question_encoder_name_or_path UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : int = gen_config UpperCamelCase : Dict = question_encoder_config UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : Dict = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
38
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : str = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'camembert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : List[str] = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : str = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = use_cache UpperCamelCase : List[str] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : str = 13 UpperCamelCase : int = 7 UpperCamelCase : str = True UpperCamelCase : Dict = True UpperCamelCase : str = True UpperCamelCase : Tuple = True UpperCamelCase : List[str] = 99 UpperCamelCase : Optional[Any] = 384 UpperCamelCase : Tuple = 2 UpperCamelCase : Union[str, Any] = 4 UpperCamelCase : Dict = 37 UpperCamelCase : Any = "gelu" UpperCamelCase : List[Any] = 0.1 UpperCamelCase : int = 0.1 UpperCamelCase : Tuple = 512 UpperCamelCase : List[Any] = 16 UpperCamelCase : int = 2 UpperCamelCase : Dict = 0.02 UpperCamelCase : Optional[Any] = 3 UpperCamelCase : List[Any] = 4 UpperCamelCase : Dict = 128 UpperCamelCase : Optional[Any] = 2 UpperCamelCase : Optional[int] = 9 UpperCamelCase : Optional[int] = 1 UpperCamelCase : Union[str, Any] = None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : str = None if self.use_input_mask: UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = None UpperCamelCase : List[Any] = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Any = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel(config=A_ ) UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Optional[int] = [input_ids, input_mask] UpperCamelCase : Any = model(A_ ) UpperCamelCase : int = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ ) UpperCamelCase : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = self.num_choices UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ ) UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ ) UpperCamelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Any = False _UpperCAmelCase :int = False _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFConvBertModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Optional[Any] = True UpperCamelCase : Any = True if hasattr(A_ , "use_cache" ): UpperCamelCase : List[str] = True UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ ) for model_class in self.all_model_classes: UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ ) UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Optional[int] = len(model(A_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" ) UpperCamelCase : Dict = tf.keras.models.load_model(A_ ) UpperCamelCase : str = model(A_ ) if self.is_encoder_decoder: UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"] UpperCamelCase : Any = outputs["encoder_attentions"] else: UpperCamelCase : Any = outputs["hidden_states"] UpperCamelCase : List[str] = outputs["attentions"] self.assertEqual(len(A_ ) , A_ ) UpperCamelCase : int = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Dict = True UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ ) UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ ) def check_decoder_attentions_output(A_ ): UpperCamelCase : Optional[Any] = len(A_ ) self.assertEqual(out_len % 2 , 0 ) UpperCamelCase : Any = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A_ ): UpperCamelCase : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True UpperCamelCase : List[Any] = False UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : List[str] = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCamelCase : Tuple = True UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine UpperCamelCase : Optional[int] = True UpperCamelCase : List[str] = True UpperCamelCase : Optional[int] = model_class(A_ ) UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @require_tf class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : List[str] = model(A_ )[0] UpperCamelCase : int = [1, 6, 768] self.assertEqual(output.shape , A_ ) UpperCamelCase : List[str] = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
38
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase : Dict = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase : Tuple = { """facebook/blenderbot_small-90M""": 512, } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , ) UpperCamelCase : Union[str, Any] = add_prefix_space def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
38
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : str = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'camembert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : List[str] = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : str = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = use_cache UpperCamelCase : List[str] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer __lowerCamelCase : Optional[int] = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Optional[Any] = 'AutoTokenizer' _UpperCAmelCase :Tuple = ['tokenizer'] _UpperCAmelCase :int = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self , A_ , A_=None ): '''simple docstring''' super().__init__(A_ ) UpperCamelCase : List[Any] = speaker_embeddings @classmethod def __UpperCamelCase( cls , A_ , A_="speaker_embeddings_path.json" , **A_ ): '''simple docstring''' if speaker_embeddings_dict_path is not None: UpperCamelCase : Any = get_file_from_repo( A_ , A_ , subfolder=kwargs.pop("subfolder" , A_ ) , cache_dir=kwargs.pop("cache_dir" , A_ ) , force_download=kwargs.pop("force_download" , A_ ) , proxies=kwargs.pop("proxies" , A_ ) , resume_download=kwargs.pop("resume_download" , A_ ) , local_files_only=kwargs.pop("local_files_only" , A_ ) , use_auth_token=kwargs.pop("use_auth_token" , A_ ) , revision=kwargs.pop("revision" , A_ ) , ) if speaker_embeddings_path is None: logger.warning( F"""`{os.path.join(A_ , A_ )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) UpperCamelCase : Optional[int] = None else: with open(A_ ) as speaker_embeddings_json: UpperCamelCase : Optional[Any] = json.load(A_ ) else: UpperCamelCase : Any = None UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(A_ , **A_ ) return cls(tokenizer=A_ , speaker_embeddings=A_ ) def __UpperCamelCase( self , A_ , A_="speaker_embeddings_path.json" , A_="speaker_embeddings" , A_ = False , **A_ , ): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(A_ , A_ , "v2" ) , exist_ok=A_ ) UpperCamelCase : Dict = {} UpperCamelCase : int = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": UpperCamelCase : Any = self._load_voice_preset(A_ ) UpperCamelCase : List[Any] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , A_ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A_ , ) UpperCamelCase : Any = os.path.join(A_ , F"""{prompt_key}_{key}.npy""" ) UpperCamelCase : List[Any] = tmp_dict with open(os.path.join(A_ , A_ ) , "w" ) as fp: json.dump(A_ , A_ ) super().save_pretrained(A_ , A_ , **A_ ) def __UpperCamelCase( self , A_ = None , **A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = self.speaker_embeddings[voice_preset] UpperCamelCase : Dict = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) UpperCamelCase : int = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , A_ ) , cache_dir=kwargs.pop("cache_dir" , A_ ) , force_download=kwargs.pop("force_download" , A_ ) , proxies=kwargs.pop("proxies" , A_ ) , resume_download=kwargs.pop("resume_download" , A_ ) , local_files_only=kwargs.pop("local_files_only" , A_ ) , use_auth_token=kwargs.pop("use_auth_token" , A_ ) , revision=kwargs.pop("revision" , A_ ) , ) if path is None: raise ValueError( F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) UpperCamelCase : Dict = np.load(A_ ) return voice_preset_dict def __UpperCamelCase( self , A_ = None ): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self , A_=None , A_=None , A_="pt" , A_=256 , A_=False , A_=True , A_=False , **A_ , ): '''simple docstring''' if voice_preset is not None and not isinstance(A_ , A_ ): if ( isinstance(A_ , A_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): UpperCamelCase : Union[str, Any] = self._load_voice_preset(A_ ) else: if isinstance(A_ , A_ ) and not voice_preset.endswith(".npz" ): UpperCamelCase : Any = voice_preset + ".npz" UpperCamelCase : Optional[int] = np.load(A_ ) if voice_preset is not None: self._validate_voice_preset_dict(A_ , **A_ ) UpperCamelCase : Optional[int] = BatchFeature(data=A_ , tensor_type=A_ ) UpperCamelCase : Tuple = self.tokenizer( A_ , return_tensors=A_ , padding="max_length" , max_length=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , add_special_tokens=A_ , **A_ , ) if voice_preset is not None: UpperCamelCase : Tuple = voice_preset return encoded_text
38
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(input_a == input_a == 0 ) def A_ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Union[str, Any]: UpperCamelCase : Any = "backbone." if is_semantic else "" UpperCamelCase : Tuple = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (F"""{prefix}cls_token""", "beit.embeddings.cls_token"), (F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> int: for i in range(config.num_hidden_layers ): UpperCamelCase : Any = "backbone." if is_semantic else "" # queries, keys and values UpperCamelCase : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" ) UpperCamelCase : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" ) UpperCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" ) UpperCamelCase : Any = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase : List[str] = q_bias UpperCamelCase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase : List[str] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCamelCase : str = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" ) UpperCamelCase : Dict = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" ) UpperCamelCase : Optional[Any] = gamma_a UpperCamelCase : str = gamma_a def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : str = dct.pop(_lowerCAmelCase ) UpperCamelCase : int = val def A_ ( ) -> int: UpperCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: UpperCamelCase : Dict = False if "rvlcdip" in checkpoint_url else True UpperCamelCase : List[str] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCamelCase : Optional[Any] = 1024 UpperCamelCase : int = 4096 UpperCamelCase : Tuple = 24 UpperCamelCase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCamelCase : str = 16 UpperCamelCase : Optional[Any] = "huggingface/label-files" UpperCamelCase : Optional[int] = "rvlcdip-id2label.json" UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) UpperCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase : Dict = idalabel UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCamelCase : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"] UpperCamelCase : Dict = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase ) # load HuggingFace model UpperCamelCase : Dict = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase ) model.eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image UpperCamelCase : Tuple = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase ) UpperCamelCase : int = prepare_img() UpperCamelCase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) UpperCamelCase : Tuple = encoding["pixel_values"] UpperCamelCase : Optional[Any] = model(_lowerCAmelCase ) UpperCamelCase : List[Any] = outputs.logits # verify logits UpperCamelCase : Tuple = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: if has_lm_head: UpperCamelCase : List[str] = "dit-base" if "base" in checkpoint_url else "dit-large" else: UpperCamelCase : Optional[int] = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) __lowerCamelCase : List[str] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
38
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer'] _UpperCAmelCase :Tuple = 'BlipImageProcessor' _UpperCAmelCase :Optional[int] = 'AutoTokenizer' def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = False super().__init__(A_ , A_ ) UpperCamelCase : str = self.image_processor def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: UpperCamelCase : int = self.tokenizer UpperCamelCase : Optional[int] = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ ) if text is not None: UpperCamelCase : Dict = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: UpperCamelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.tokenizer.model_input_names UpperCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
38
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.json"""} __lowerCamelCase : List[str] = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } __lowerCamelCase : Any = {"""mgp-str""": 27} class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , A_ , A_="[GO]" , A_="[GO]" , A_="[s]" , A_="[GO]" , **A_ ): '''simple docstring''' super().__init__( unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ , ) with open(A_ , encoding="utf-8" ) as vocab_handle: UpperCamelCase : List[str] = json.load(A_ ) UpperCamelCase : Tuple = {v: k for k, v in self.vocab.items()} @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.vocab ) def __UpperCamelCase( self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Any = [] for s in text: char_tokens.extend(A_ ) return char_tokens def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.vocab.get(A_ , self.vocab.get(self.unk_token ) ) def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.decoder.get(A_ ) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if not os.path.isdir(A_ ): logger.error("Vocabulary path ({}) should be a directory".format(A_ ) ) return UpperCamelCase : Optional[int] = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(A_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" ) return (vocab_file,)
38
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Tuple = ['audio_values', 'audio_mask'] def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ): '''simple docstring''' super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) UpperCamelCase : Optional[int] = spectrogram_length UpperCamelCase : Dict = num_channels UpperCamelCase : Optional[Any] = patch_size UpperCamelCase : str = feature_size // self.patch_size[1] UpperCamelCase : List[str] = n_fft UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate UpperCamelCase : Optional[int] = sampling_rate UpperCamelCase : int = padding_value UpperCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = spectrogram( A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) UpperCamelCase : List[Any] = log_spec[:, :-1] UpperCamelCase : Optional[int] = log_spec - 20.0 UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCamelCase : str = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCamelCase : List[str] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCamelCase : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCamelCase : List[str] = padded_audio_features * self.padding_value for i in range(len(A_ ) ): UpperCamelCase : Union[str, Any] = audio_features[i] UpperCamelCase : Optional[int] = feature # return as BatchFeature if return_attention_mask: UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: UpperCamelCase : int = {"audio_values": padded_audio_features} UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
38
1
from __future__ import annotations __lowerCamelCase : Optional[int] = """Muhammad Umer Farooq""" __lowerCamelCase : Tuple = """MIT""" __lowerCamelCase : Optional[int] = """1.0.0""" __lowerCamelCase : int = """Muhammad Umer Farooq""" __lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me""" __lowerCamelCase : Dict = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' super().__init__() UpperCamelCase : list[str] = [] UpperCamelCase : str = domain def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase : Any = parse.urljoin(self.domain , A_ ) self.urls.append(A_ ) def A_ ( _lowerCAmelCase ) -> str: return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] ) def A_ ( _lowerCAmelCase ) -> str: return parse.urlparse(_lowerCAmelCase ).netloc def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]: UpperCamelCase : int = get_domain_name(_lowerCAmelCase ) # Initialize the parser UpperCamelCase : str = Parser(_lowerCAmelCase ) try: # Open URL UpperCamelCase : int = requests.get(_lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase : Optional[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ) # Get the valid email. UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = emails_from_url("""https://github.com""") print(f"""{len(emails)} emails found:""") print("""\n""".join(sorted(emails)))
38
from __future__ import annotations from random import random from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar("""KT""") __lowerCamelCase : Dict = TypeVar("""VT""") class A__ ( Generic[KT, VT] ): def __init__( self , A_ = "root" , A_ = None ): '''simple docstring''' UpperCamelCase : int = key UpperCamelCase : List[Any] = value UpperCamelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F"""Node({self.key}: {self.value})""" @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.forward ) class A__ ( Generic[KT, VT] ): def __init__( self , A_ = 0.5 , A_ = 16 ): '''simple docstring''' UpperCamelCase : Node[KT, VT] = Node[KT, VT]() UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = p UpperCamelCase : List[str] = max_level def __str__( self ): '''simple docstring''' UpperCamelCase : int = list(self ) if len(A_ ) == 0: return F"""SkipList(level={self.level})""" UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 ) UpperCamelCase : Dict = max(A_ , 4 ) + 4 UpperCamelCase : str = self.head UpperCamelCase : List[Any] = [] UpperCamelCase : int = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) ) lines.append(" " * label_size + "| " * len(A_ ) ) while len(node.forward ) != 0: UpperCamelCase : Union[str, Any] = node.forward[0] lines.append( F"""[{node.key}]""".ljust(A_ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(A_ ) ) UpperCamelCase : Tuple = node.forward lines.append("None".ljust(A_ ) + "* " * len(A_ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(A_ ) def __iter__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCamelCase : Union[str, Any] = node.forward[0] def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = 1 while random() < self.p and level < self.max_level: level += 1 return level def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = [] UpperCamelCase : List[Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase : str = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(A_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ ) if node is not None: for i, update_node in enumerate(A_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase : Tuple = node.forward[i] else: UpperCamelCase : List[Any] = update_node.forward[:i] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ ) if node is not None: UpperCamelCase : Union[str, Any] = value else: UpperCamelCase : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , A_ ): update_vector.append(self.head ) UpperCamelCase : Optional[int] = level UpperCamelCase : Dict = Node(A_ , A_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(A_ ) else: UpperCamelCase : List[Any] = new_node def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ ) if node is not None: return node.value return None def A_ ( ) -> List[Any]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) UpperCamelCase : Optional[int] = skip_list.head UpperCamelCase : List[str] = {} while node.level != 0: UpperCamelCase : str = node.forward[0] UpperCamelCase : Optional[int] = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def A_ ( ) -> List[Any]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) UpperCamelCase : Dict = skip_list.head UpperCamelCase : Tuple = {} while node.level != 0: UpperCamelCase : List[str] = node.forward[0] UpperCamelCase : Dict = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() assert skip_list.find("Some key" ) is None def A_ ( ) -> Tuple: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def A_ ( ) -> Dict: UpperCamelCase : Optional[int] = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def A_ ( ) -> Dict: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[str]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(_lowerCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def A_ ( ) -> Union[str, Any]: def is_sorted(_lowerCAmelCase ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) UpperCamelCase : int = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def A_ ( ) -> Tuple: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A_ ( _lowerCAmelCase = "laptop" ) -> DataFrame: UpperCamelCase : Union[str, Any] = F"""https://www.amazon.in/laptop/s?k={product}""" UpperCamelCase : List[str] = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } UpperCamelCase : List[Any] = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text ) # Initialize a Pandas dataframe with the column titles UpperCamelCase : List[str] = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: UpperCamelCase : Union[str, Any] = item.ha.text UpperCamelCase : str = "https://www.amazon.in/" + item.ha.a["href"] UpperCamelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text try: UpperCamelCase : Tuple = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: UpperCamelCase : Union[str, Any] = "Not available" try: UpperCamelCase : Optional[int] = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: UpperCamelCase : int = "" try: UpperCamelCase : Any = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: UpperCamelCase : int = float("nan" ) except AttributeError: pass UpperCamelCase : List[Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] UpperCamelCase : int = " " UpperCamelCase : Union[str, Any] = " " data_frame.index += 1 return data_frame if __name__ == "__main__": __lowerCamelCase : Tuple = """headphones""" get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
38
from PIL import Image def A_ ( _lowerCAmelCase ) -> Image: UpperCamelCase , UpperCamelCase : List[Any] = image.size UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : List[str] = image.load() for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): UpperCamelCase : List[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCAmelCase ): for i in range(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
38
1
import argparse import os import re __lowerCamelCase : Any = """src/diffusers""" # Pattern that looks at the indentation in a line. __lowerCamelCase : Optional[int] = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. __lowerCamelCase : Optional[Any] = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __lowerCamelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. __lowerCamelCase : Optional[Any] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __lowerCamelCase : List[Any] = re.compile(r"""\[([^\]]+)\]""") def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : List[str] = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def A_ ( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Any: UpperCamelCase : Optional[int] = 0 UpperCamelCase : Any = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 UpperCamelCase : Tuple = ["\n".join(lines[:index] )] else: UpperCamelCase : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCamelCase : Optional[Any] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: UpperCamelCase : Optional[Any] = [] else: blocks.append("\n".join(_lowerCAmelCase ) ) UpperCamelCase : Optional[int] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("\n".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def A_ ( _lowerCAmelCase ) -> Any: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("_" , "" ) return _inner def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: UpperCamelCase : Optional[Any] = noop # Constants are all uppercase, they go first. UpperCamelCase : Optional[Any] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCamelCase : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. UpperCamelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] UpperCamelCase : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Any: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): UpperCamelCase : Any = match.groups()[0] if "," not in imports: return F"""[{imports}]""" UpperCamelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCamelCase : Tuple = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] ) + "]" UpperCamelCase : Optional[int] = import_statement.split("\n" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCamelCase : List[Any] = 2 if lines[1].strip() == "[" else 1 UpperCamelCase : Tuple = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCamelCase : int = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) UpperCamelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCamelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] ) else: UpperCamelCase : Any = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCamelCase : Union[str, Any] = keys[:-1] UpperCamelCase : Union[str, Any] = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line UpperCamelCase : Tuple = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def A_ ( _lowerCAmelCase , _lowerCAmelCase=True ) -> Any: with open(_lowerCAmelCase , "r" ) as f: UpperCamelCase : List[str] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCamelCase : int = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCamelCase : Optional[int] = main_blocks[block_idx] UpperCamelCase : List[str] = block.split("\n" ) # Get to the start of the imports. UpperCamelCase : List[Any] = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCamelCase : int = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. UpperCamelCase : Any = "\n".join(block_lines[line_idx:-1] ) UpperCamelCase : Optional[int] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCamelCase : Union[str, Any] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCamelCase : Dict = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCamelCase : List[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCamelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] UpperCamelCase : int = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCamelCase : List[Any] = 0 UpperCamelCase : Optional[int] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: UpperCamelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. UpperCamelCase : Tuple = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(_lowerCAmelCase , "w" ) as f: f.write("\n".join(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase=True ) -> Optional[int]: UpperCamelCase : List[Any] = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: UpperCamelCase : str = sort_imports(os.path.join(_lowerCAmelCase , "__init__.py" ) , check_only=_lowerCAmelCase ) if result: UpperCamelCase : str = [os.path.join(_lowerCAmelCase , "__init__.py" )] if len(_lowerCAmelCase ) > 0: raise ValueError(F"""Would overwrite {len(_lowerCAmelCase )} files, run `make style`.""" ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") __lowerCamelCase : int = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
38
from math import loga def A_ ( _lowerCAmelCase ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
38
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __lowerCamelCase : Any = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __lowerCamelCase : Optional[int] = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __lowerCamelCase : Any = r""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = 0.0 for i, j in zip(A_ , A_ ): n_correct += 1.0 if math_equivalence.is_equiv(A_ , A_ ) else 0.0 UpperCamelCase : Optional[Any] = n_correct / len(A_ ) return { "accuracy": accuracy, }
38
from __future__ import annotations __lowerCamelCase : Optional[int] = """Muhammad Umer Farooq""" __lowerCamelCase : Tuple = """MIT""" __lowerCamelCase : Optional[int] = """1.0.0""" __lowerCamelCase : int = """Muhammad Umer Farooq""" __lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me""" __lowerCamelCase : Dict = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' super().__init__() UpperCamelCase : list[str] = [] UpperCamelCase : str = domain def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase : Any = parse.urljoin(self.domain , A_ ) self.urls.append(A_ ) def A_ ( _lowerCAmelCase ) -> str: return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] ) def A_ ( _lowerCAmelCase ) -> str: return parse.urlparse(_lowerCAmelCase ).netloc def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]: UpperCamelCase : int = get_domain_name(_lowerCAmelCase ) # Initialize the parser UpperCamelCase : str = Parser(_lowerCAmelCase ) try: # Open URL UpperCamelCase : int = requests.get(_lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase : Optional[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ) # Get the valid email. UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = emails_from_url("""https://github.com""") print(f"""{len(emails)} emails found:""") print("""\n""".join(sorted(emails)))
38
1
from dataclasses import dataclass, field from typing import Optional @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) _UpperCAmelCase :Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) _UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) _UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) _UpperCAmelCase :Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) _UpperCAmelCase :Optional[int] = field( default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) _UpperCAmelCase :Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) _UpperCAmelCase :Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) _UpperCAmelCase :Optional[int] = field( default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) _UpperCAmelCase :Optional[int] = field( default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} ) _UpperCAmelCase :Optional[bool] = field( default=__snake_case , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) _UpperCAmelCase :Optional[int] = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} ) _UpperCAmelCase :Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) _UpperCAmelCase :Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} ) _UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) _UpperCAmelCase :Optional[int] = field( default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) _UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) _UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) _UpperCAmelCase :Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) _UpperCAmelCase :Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} ) _UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) _UpperCAmelCase :Optional[int] = field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} ) _UpperCAmelCase :Optional[int] = field( default=__snake_case , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) _UpperCAmelCase :Optional[bool] = field( default=__snake_case , metadata={'help': 'Sample from the language model\'s output distribution.'} ) _UpperCAmelCase :Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) _UpperCAmelCase :Optional[int] = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} ) _UpperCAmelCase :Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) _UpperCAmelCase :Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) _UpperCAmelCase :Optional[int] = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} ) _UpperCAmelCase :Optional[int] = field( default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} ) _UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) _UpperCAmelCase :Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) _UpperCAmelCase :Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) _UpperCAmelCase :Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class A__ : _UpperCAmelCase :Optional[int] = field( default=__snake_case , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) _UpperCAmelCase :Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) _UpperCAmelCase :Optional[int] = field( default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} ) _UpperCAmelCase :Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) _UpperCAmelCase :Optional[float] = field( default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) _UpperCAmelCase :Optional[float] = field( default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) _UpperCAmelCase :Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) _UpperCAmelCase :Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) _UpperCAmelCase :Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) _UpperCAmelCase :Optional[bool] = field( default=__snake_case , metadata={'help': 'If True, near-duplicate samples are removed.'} ) _UpperCAmelCase :Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) _UpperCAmelCase :Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) _UpperCAmelCase :Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) _UpperCAmelCase :Optional[int] = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} ) _UpperCAmelCase :Optional[int] = field( default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) _UpperCAmelCase :Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) _UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) _UpperCAmelCase :Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) _UpperCAmelCase :Optional[int] = field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) _UpperCAmelCase :Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) _UpperCAmelCase :Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) _UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} )
38
from __future__ import annotations def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : Optional[Any] = [True] * limit UpperCamelCase : Optional[Any] = False UpperCamelCase : List[str] = False UpperCamelCase : Tuple = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase : Optional[Any] = i * 2 while index < limit: UpperCamelCase : int = False UpperCamelCase : Optional[int] = index + i UpperCamelCase : Any = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def A_ ( _lowerCAmelCase = 100_0000 ) -> int: UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = 0 for i in range(len(_lowerCAmelCase ) ): for j in range(i + length , len(_lowerCAmelCase ) ): UpperCamelCase : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase : int = j - i UpperCamelCase : Dict = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
38
1
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__ ( unittest.TestCase ): @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_uncond_unet UpperCamelCase : Any = PNDMScheduler() UpperCamelCase : int = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) UpperCamelCase : Tuple = torch.manual_seed(0 ) UpperCamelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type="numpy" ).images UpperCamelCase : Any = torch.manual_seed(0 ) UpperCamelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type="numpy" , return_dict=A_ )[0] UpperCamelCase : Any = image[0, -3:, -3:, -1] UpperCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "google/ddpm-cifar10-32" UpperCamelCase : Tuple = UNetaDModel.from_pretrained(A_ ) UpperCamelCase : str = PNDMScheduler() UpperCamelCase : Union[str, Any] = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) UpperCamelCase : Tuple = torch.manual_seed(0 ) UpperCamelCase : Tuple = pndm(generator=A_ , output_type="numpy" ).images UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase : int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __snake_case ): def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ): '''simple docstring''' super().__init__( features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , ) UpperCamelCase : Optional[int] = Generator( cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , ) def __UpperCamelCase( self ): '''simple docstring''' if self.streaming: UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: UpperCamelCase : Union[str, Any] = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = None UpperCamelCase : List[str] = None self.builder.download_and_prepare( download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , ) UpperCamelCase : int = self.builder.as_dataset( split="train" , verification_mode=A_ , in_memory=self.keep_in_memory ) return dataset
38
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = inspect.getfile(accelerate.test_utils ) UpperCamelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCamelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase : Optional[int] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase : Dict = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(A_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCamelCase : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(A_ , env=os.environ.copy() ) if __name__ == "__main__": __lowerCamelCase : Tuple = Accelerator() __lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10) __lowerCamelCase : Tuple = torch.randint(0, 10, shape).to(accelerator.device) __lowerCamelCase : str = """""" __lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __lowerCamelCase : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __lowerCamelCase : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
38
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing return x.sum() def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing return i + 1 @dataclass class A__ : _UpperCAmelCase :int _UpperCAmelCase :str class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = [1, 2] UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]} UpperCamelCase : Any = {"a": {"1": 1}, "b": 2} UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4} UpperCamelCase : Dict = {} UpperCamelCase : Any = [] UpperCamelCase : Any = 2 UpperCamelCase : Any = [2, 3] UpperCamelCase : Optional[Any] = {"a": 2, "b": 3} UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]} UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3} UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) UpperCamelCase : List[str] = 2 self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} UpperCamelCase : int = {"a": 2, "b": 0, "c": 2} UpperCamelCase : Union[str, Any] = { "a": np.eye(2 ).astype(A_ ), "b": np.zeros(3 ).astype(A_ ), "c": np.ones(2 ).astype(A_ ), } self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A_ ): # can't pickle a local lambda map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : List[Any] = {"a": 3, "b": 4} UpperCamelCase : Tuple = {"a": 5, "b": 6} UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' class A__ : _UpperCAmelCase :str = 'bar' UpperCamelCase : List[Any] = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(A_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )} UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A__ ( __snake_case ): @require_tf def __UpperCamelCase( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers UpperCamelCase : int = layers.Dense(2 ) def gen_random_output(): UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) ) return model(A_ ).numpy() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : List[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : Dict = gen_random_output() UpperCamelCase : Optional[int] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch def gen_random_output(): UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 ) UpperCamelCase : Dict = torch.rand(1 , 3 ) return model(A_ ).detach().numpy() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Dict = gen_random_output() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Optional[int] = gen_random_output() UpperCamelCase : List[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __UpperCamelCase( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() UpperCamelCase : Optional[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}] ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten() assert output == expected_output def A_ ( ) -> List[Any]: UpperCamelCase : str = A(x=1 , y="foobar" ) UpperCamelCase : Tuple = {"x": 1, "y": "foobar"} assert asdict(_lowerCAmelCase ) == expected_output UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCAmelCase ) == expected_output with pytest.raises(_lowerCAmelCase ): asdict([1, A(x=10 , y="foo" )] ) def A_ ( _lowerCAmelCase ) -> Tuple: return text.split() def A_ ( _lowerCAmelCase ) -> Dict: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A_ ( ) -> str: with Pool(2 ) as pool: UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCamelCase : Any = [] for yield_time, content in iflatmap_unordered( _lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCAmelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(_lowerCAmelCase ) == 4
38
1
from __future__ import annotations import requests __lowerCamelCase : Optional[int] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = "new" , _lowerCAmelCase = None ) -> dict: UpperCamelCase : Union[str, Any] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowerCAmelCase ) - valid_terms ) ): UpperCamelCase : Optional[int] = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(_lowerCAmelCase ) UpperCamelCase : Dict = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError UpperCamelCase : str = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowerCAmelCase )} UpperCamelCase : Optional[Any] = {} for id_ in range(_lowerCAmelCase ): UpperCamelCase : Tuple = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
38
from ..utils import DummyObject, requires_backends class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['note_seq'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] )
38
1
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : int = "" for word_or_phrase in separated: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise Exception("join() accepts only strings to be joined" ) joined += word_or_phrase + separator return joined.strip(_lowerCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
38
import math import tensorflow as tf from packaging import version def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) )) return x * cdf def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase ) return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def A_ ( _lowerCAmelCase ) -> List[Any]: return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 ) def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str: UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase ) return a * tf.math.sigmoid(_lowerCAmelCase ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def A_ ( _lowerCAmelCase ) -> Any: return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase ) __lowerCamelCase : Optional[int] = tf.keras.activations.gelu __lowerCamelCase : int = approximate_gelu_wrap else: __lowerCamelCase : List[Any] = _gelu __lowerCamelCase : Optional[Any] = _gelu_new __lowerCamelCase : Any = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def A_ ( _lowerCAmelCase ) -> Optional[Any]: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
38
1
def A_ ( ) -> list[list[int]]: return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] __lowerCamelCase : Optional[Any] = generate_large_matrix() __lowerCamelCase : Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A_ ( _lowerCAmelCase ) -> None: assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid ) assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : List[str] = 0 UpperCamelCase : Optional[Any] = len(_lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: UpperCamelCase : int = (left + right) // 2 UpperCamelCase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: UpperCamelCase : Optional[int] = mid + 1 else: UpperCamelCase : Union[str, Any] = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : List[Any] = 0 UpperCamelCase : Any = len(grid[0] ) for i in range(len(_lowerCAmelCase ) ): UpperCamelCase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(_lowerCAmelCase ) * len(grid[0] )) - total def A_ ( _lowerCAmelCase ) -> int: return len([number for row in grid for number in row if number < 0] ) def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : List[str] = 0 for row in grid: for i, number in enumerate(_lowerCAmelCase ): if number < 0: total += len(_lowerCAmelCase ) - i break return total def A_ ( ) -> None: from timeit import timeit print("Running benchmarks" ) UpperCamelCase : Dict = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): UpperCamelCase : List[str] = timeit(F"""{func}(grid=grid)""" , setup=_lowerCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
38
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :str = KandinskyVaaPipeline _UpperCAmelCase :str = [ 'image_embeds', 'negative_image_embeds', ] _UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds'] _UpperCAmelCase :List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :List[str] = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : Dict = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Dict = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) if str(A_ ).startswith("mps" ): UpperCamelCase : Optional[Any] = torch.manual_seed(A_ ) else: UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Optional[int] = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "cpu" UpperCamelCase : List[str] = self.get_dummy_components() UpperCamelCase : Tuple = self.pipeline_class(**A_ ) UpperCamelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : Optional[int] = output.images UpperCamelCase : int = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : Tuple = image[0, -3:, -3:, -1] UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : int = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) UpperCamelCase : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : str = "red cat, 4k photo" UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Tuple = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase : Tuple = pipeline( image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_ , A_ )
38
1
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool: UpperCamelCase : Optional[int] = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(" " , "" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(_lowerCAmelCase ) == 26 def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool: UpperCamelCase : int = [False] * 26 for char in input_str: if char.islower(): UpperCamelCase : Tuple = True elif char.isupper(): UpperCamelCase : str = True return all(_lowerCAmelCase ) def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool: return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def A_ ( ) -> None: from timeit import timeit UpperCamelCase : Any = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=_lowerCAmelCase ) ) print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) ) print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
38
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A_ ( ) -> Dict: UpperCamelCase : Tuple = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_lowerCAmelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_lowerCAmelCase ) return parser.parse_args() def A_ ( ) -> Optional[int]: UpperCamelCase : Tuple = parse_args() # Import training_script as a module. UpperCamelCase : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase : List[Any] = script_fpath.stem UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase ) # Patch sys.argv UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
38
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class A__ ( unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ViTImageProcessor if is_vision_available() else None @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = (3, 32, 128) UpperCamelCase : Tuple = tempfile.mkdtemp() # fmt: off UpperCamelCase : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on UpperCamelCase : Tuple = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(A_ ) + "\n" ) UpperCamelCase : Any = { "do_normalize": False, "do_resize": True, "image_processor_type": "ViTImageProcessor", "resample": 3, "size": {"height": 32, "width": 128}, } UpperCamelCase : int = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(A_ , A_ ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) UpperCamelCase : Union[str, Any] = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.get_tokenizer() UpperCamelCase : List[Any] = self.get_image_processor() UpperCamelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.get_tokenizer() UpperCamelCase : str = self.get_image_processor() UpperCamelCase : Dict = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCamelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) UpperCamelCase : Union[str, Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.get_image_processor() UpperCamelCase : Dict = self.get_tokenizer() UpperCamelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : List[str] = self.prepare_image_inputs() UpperCamelCase : List[str] = image_processor(A_ , return_tensors="np" ) UpperCamelCase : Any = processor(images=A_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.get_image_processor() UpperCamelCase : Union[str, Any] = self.get_tokenizer() UpperCamelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Tuple = "test" UpperCamelCase : Optional[int] = processor(text=A_ ) UpperCamelCase : Dict = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.get_image_processor() UpperCamelCase : List[Any] = self.get_tokenizer() UpperCamelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Tuple = "test" UpperCamelCase : Dict = self.prepare_image_inputs() UpperCamelCase : Tuple = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.get_image_processor() UpperCamelCase : Tuple = self.get_tokenizer() UpperCamelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase : Dict = processor.char_decode(A_ ) UpperCamelCase : int = tokenizer.batch_decode(A_ ) UpperCamelCase : str = [seq.replace(" " , "" ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.get_image_processor() UpperCamelCase : Optional[int] = self.get_tokenizer() UpperCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Dict = None UpperCamelCase : Any = self.prepare_image_inputs() UpperCamelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.get_image_processor() UpperCamelCase : Optional[int] = self.get_tokenizer() UpperCamelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : int = torch.randn(1 , 27 , 38 ) UpperCamelCase : Tuple = torch.randn(1 , 27 , 5_0257 ) UpperCamelCase : Union[str, Any] = torch.randn(1 , 27 , 3_0522 ) UpperCamelCase : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : def __init__( self , A_ , A_=None , A_=None , A_=None , A_="resnet50" , A_=3 , A_=32 , A_=3 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : str = parent UpperCamelCase : Any = out_indices if out_indices is not None else [4] UpperCamelCase : List[Any] = stage_names UpperCamelCase : Dict = out_features UpperCamelCase : Union[str, Any] = backbone UpperCamelCase : int = batch_size UpperCamelCase : List[str] = image_size UpperCamelCase : Tuple = num_channels UpperCamelCase : List[Any] = use_pretrained_backbone UpperCamelCase : str = is_training def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[Any] = self.get_config() return config, pixel_values def __UpperCamelCase( self ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = TimmBackbone(config=A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase : Optional[int] = model(A_ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase : Union[str, Any] = config_and_inputs UpperCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :List[Any] = (TimmBackbone,) if is_torch_available() else () _UpperCAmelCase :Optional[int] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} _UpperCAmelCase :List[Any] = False _UpperCAmelCase :List[str] = False _UpperCAmelCase :str = False _UpperCAmelCase :Optional[int] = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = TimmBackboneModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , has_text_modality=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = "resnet18" UpperCamelCase : Optional[Any] = "microsoft/resnet-18" UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ ) UpperCamelCase : Union[str, Any] = AutoBackbone.from_pretrained(A_ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) UpperCamelCase : int = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ , out_indices=[1, 2, 3] ) UpperCamelCase : Dict = AutoBackbone.from_pretrained(A_ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Safetensors is not supported by timm." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(A_ ) UpperCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : int = [*signature.parameters.keys()] UpperCamelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Union[str, Any] = True UpperCamelCase : str = self.has_attentions # no need to test all models as different heads yield the same functionality UpperCamelCase : str = self.all_model_classes[0] UpperCamelCase : Optional[Any] = model_class(A_ ) model.to(A_ ) UpperCamelCase : Tuple = self._prepare_for_class(A_ , A_ ) UpperCamelCase : Any = model(**A_ ) UpperCamelCase : Tuple = outputs[0][-1] # Encoder-/Decoder-only models UpperCamelCase : Optional[Any] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: UpperCamelCase : List[str] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=A_ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Any = model(**A_ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None UpperCamelCase : Any = copy.deepcopy(A_ ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = model_class(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model(**A_ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights UpperCamelCase : Optional[int] = copy.deepcopy(A_ ) UpperCamelCase : Dict = False UpperCamelCase : int = model_class(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Tuple = model(**A_ )
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18} UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : Union[str, Any] = num_frames UpperCamelCase : Any = image_size UpperCamelCase : Tuple = min_resolution UpperCamelCase : Optional[Any] = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Tuple = size UpperCamelCase : List[Any] = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Any = image_std UpperCamelCase : str = crop_size def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = VivitImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
38
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """microsoft/swinv2-tiny-patch4-window8-256""": ( """https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :List[Any] = 'swinv2' _UpperCAmelCase :Tuple = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1e-5 , A_=32 , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : List[Any] = image_size UpperCamelCase : List[str] = patch_size UpperCamelCase : str = num_channels UpperCamelCase : str = embed_dim UpperCamelCase : Dict = depths UpperCamelCase : List[Any] = len(A_ ) UpperCamelCase : Optional[Any] = num_heads UpperCamelCase : List[Any] = window_size UpperCamelCase : Any = mlp_ratio UpperCamelCase : List[Any] = qkv_bias UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : Tuple = drop_path_rate UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : List[Any] = use_absolute_embeddings UpperCamelCase : int = layer_norm_eps UpperCamelCase : Any = initializer_range UpperCamelCase : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase : Union[str, Any] = int(embed_dim * 2 ** (len(A_ ) - 1) ) UpperCamelCase : Optional[Any] = (0, 0, 0, 0)
38
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase : Dict = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase : Tuple = { """facebook/blenderbot_small-90M""": 512, } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , ) UpperCamelCase : Union[str, Any] = add_prefix_space def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
38
1
from torch import nn def A_ ( _lowerCAmelCase ) -> List[Any]: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : int = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
# Algorithm for the pigeonhole sorting def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : List[str] = min(_lowerCAmelCase ) # min() finds the minimum value UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value UpperCamelCase : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size UpperCamelCase : List[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. UpperCamelCase : Union[str, Any] = 0 for count in range(_lowerCAmelCase ): while holes[count] > 0: holes[count] -= 1 UpperCamelCase : Dict = count + min_val i += 1 def A_ ( ) -> Optional[Any]: UpperCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_lowerCAmelCase ) print("Sorted order is:" , " ".join(_lowerCAmelCase ) ) if __name__ == "__main__": main()
38
import logging import os import threading import time try: import warnings except ImportError: __lowerCamelCase : str = None try: import msvcrt except ImportError: __lowerCamelCase : str = None try: import fcntl except ImportError: __lowerCamelCase : List[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowerCamelCase : Union[str, Any] = OSError # Data # ------------------------------------------------ __lowerCamelCase : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowerCamelCase : Union[str, Any] = """3.0.12""" __lowerCamelCase : Any = None def A_ ( ) -> List[Any]: global _logger UpperCamelCase : Any = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = lock_file return None def __str__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.lock.release() return None class A__ : def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase : List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase : Tuple = None # The default timeout value. UpperCamelCase : Optional[Any] = timeout # We use this lock primarily for the lock counter. UpperCamelCase : Union[str, Any] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase : Dict = 0 return None @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file @property def __UpperCamelCase( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = float(A_ ) return None def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCamelCase( self , A_=None , A_=0.05 ): '''simple docstring''' if timeout is None: UpperCamelCase : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase : Dict = id(self ) UpperCamelCase : List[str] = self._lock_file UpperCamelCase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase( self , A_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase : List[Any] = id(self ) UpperCamelCase : Dict = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCamelCase : Dict = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=A_ ) return None def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase : Optional[int] = os.path.dirname(A_ ) UpperCamelCase : int = str(hash(A_ ) ) UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(A_ , A_ ) else: return path class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase : str = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self._lock_file_fd UpperCamelCase : str = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase : int = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase : List[str] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self._lock_file_fd UpperCamelCase : List[Any] = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase : Tuple = fd return None def __UpperCamelCase( self ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowerCamelCase : Dict = None if msvcrt: __lowerCamelCase : Any = WindowsFileLock elif fcntl: __lowerCamelCase : Any = UnixFileLock else: __lowerCamelCase : int = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
38
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , ): '''simple docstring''' UpperCamelCase : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} UpperCamelCase : str = parent UpperCamelCase : Optional[Any] = batch_size UpperCamelCase : Any = num_channels UpperCamelCase : str = min_resolution UpperCamelCase : Union[str, Any] = max_resolution UpperCamelCase : str = do_resize UpperCamelCase : List[str] = size UpperCamelCase : List[Any] = do_normalize UpperCamelCase : int = image_mean UpperCamelCase : Optional[Any] = image_std UpperCamelCase : str = do_rescale UpperCamelCase : Dict = rescale_factor UpperCamelCase : Union[str, Any] = do_pad def __UpperCamelCase( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCamelCase( self , A_ , A_=False ): '''simple docstring''' if not batched: UpperCamelCase : Tuple = image_inputs[0] if isinstance(A_ , Image.Image ): UpperCamelCase , UpperCamelCase : Union[str, Any] = image.size else: UpperCamelCase , UpperCamelCase : Optional[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase : Dict = int(self.size["shortest_edge"] * h / w ) UpperCamelCase : List[str] = self.size["shortest_edge"] elif w > h: UpperCamelCase : List[str] = self.size["shortest_edge"] UpperCamelCase : Optional[Any] = int(self.size["shortest_edge"] * w / h ) else: UpperCamelCase : Any = self.size["shortest_edge"] UpperCamelCase : str = self.size["shortest_edge"] else: UpperCamelCase : Optional[int] = [] for image in image_inputs: UpperCamelCase , UpperCamelCase : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase : Tuple = max(A_ , key=lambda A_ : item[0] )[0] UpperCamelCase : List[str] = max(A_ , key=lambda A_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = DetaImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = DetaImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_rescale" ) ) self.assertTrue(hasattr(A_ , "do_pad" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , A_ ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase , UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) UpperCamelCase : Optional[int] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Any = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : int = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : str = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCamelCase : Union[str, Any] = json.loads(f.read() ) UpperCamelCase : Optional[int] = {"image_id": 3_9769, "annotations": target} # encode them UpperCamelCase : Dict = DetaImageProcessor() UpperCamelCase : List[str] = image_processing(images=A_ , annotations=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : Optional[int] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : Any = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify orig_size UpperCamelCase : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCamelCase : Optional[int] = json.loads(f.read() ) UpperCamelCase : int = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target} UpperCamelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCamelCase : Dict = DetaImageProcessor(format="coco_panoptic" ) UpperCamelCase : Dict = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : List[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : List[str] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : int = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : List[Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify masks UpperCamelCase : Optional[int] = 82_2873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ ) # verify orig_size UpperCamelCase : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
38
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str: if config_name_or_path is None: UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCamelCase : Tuple = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCamelCase : Tuple = question_encoder_name_or_path UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : int = gen_config UpperCamelCase : Dict = question_encoder_config UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : Dict = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
38
1
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(input_a == input_a == 0 ) def A_ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : str = 13 UpperCamelCase : int = 7 UpperCamelCase : str = True UpperCamelCase : Dict = True UpperCamelCase : str = True UpperCamelCase : Tuple = True UpperCamelCase : List[str] = 99 UpperCamelCase : Optional[Any] = 384 UpperCamelCase : Tuple = 2 UpperCamelCase : Union[str, Any] = 4 UpperCamelCase : Dict = 37 UpperCamelCase : Any = "gelu" UpperCamelCase : List[Any] = 0.1 UpperCamelCase : int = 0.1 UpperCamelCase : Tuple = 512 UpperCamelCase : List[Any] = 16 UpperCamelCase : int = 2 UpperCamelCase : Dict = 0.02 UpperCamelCase : Optional[Any] = 3 UpperCamelCase : List[Any] = 4 UpperCamelCase : Dict = 128 UpperCamelCase : Optional[Any] = 2 UpperCamelCase : Optional[int] = 9 UpperCamelCase : Optional[int] = 1 UpperCamelCase : Union[str, Any] = None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : str = None if self.use_input_mask: UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = None UpperCamelCase : List[Any] = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Any = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel(config=A_ ) UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Optional[int] = [input_ids, input_mask] UpperCamelCase : Any = model(A_ ) UpperCamelCase : int = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ ) UpperCamelCase : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = self.num_choices UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ ) UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ ) UpperCamelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Any = False _UpperCAmelCase :int = False _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFConvBertModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Optional[Any] = True UpperCamelCase : Any = True if hasattr(A_ , "use_cache" ): UpperCamelCase : List[str] = True UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ ) for model_class in self.all_model_classes: UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ ) UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Optional[int] = len(model(A_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" ) UpperCamelCase : Dict = tf.keras.models.load_model(A_ ) UpperCamelCase : str = model(A_ ) if self.is_encoder_decoder: UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"] UpperCamelCase : Any = outputs["encoder_attentions"] else: UpperCamelCase : Any = outputs["hidden_states"] UpperCamelCase : List[str] = outputs["attentions"] self.assertEqual(len(A_ ) , A_ ) UpperCamelCase : int = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Dict = True UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ ) UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ ) def check_decoder_attentions_output(A_ ): UpperCamelCase : Optional[Any] = len(A_ ) self.assertEqual(out_len % 2 , 0 ) UpperCamelCase : Any = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A_ ): UpperCamelCase : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True UpperCamelCase : List[Any] = False UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : List[str] = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCamelCase : Tuple = True UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine UpperCamelCase : Optional[int] = True UpperCamelCase : List[str] = True UpperCamelCase : Optional[int] = model_class(A_ ) UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @require_tf class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : List[str] = model(A_ )[0] UpperCamelCase : int = [1, 6, 768] self.assertEqual(output.shape , A_ ) UpperCamelCase : List[str] = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
38
1
def A_ ( _lowerCAmelCase = 10 ) -> str: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or n < 0: raise ValueError("Invalid input" ) UpperCamelCase : List[Any] = 10**n UpperCamelCase : List[Any] = 2_8433 * (pow(2 , 783_0457 , _lowerCAmelCase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(10) = }""")
38
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : str = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'camembert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : List[str] = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : str = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = use_cache UpperCamelCase : List[str] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
1
from math import sqrt def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = 0 for i in range(1 , int(sqrt(_lowerCAmelCase ) + 1 ) ): if n % i == 0 and i != sqrt(_lowerCAmelCase ): total += i + n // i elif i == sqrt(_lowerCAmelCase ): total += i return total - n def A_ ( _lowerCAmelCase = 1_0000 ) -> int: UpperCamelCase : Dict = sum( i for i in range(1 , _lowerCAmelCase ) if sum_of_divisors(sum_of_divisors(_lowerCAmelCase ) ) == i and sum_of_divisors(_lowerCAmelCase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
38
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(input_a == input_a == 0 ) def A_ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Any = 16 __lowerCamelCase : str = 32 def A_ ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Any: UpperCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCamelCase : Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase : Any = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase : Dict = 16 elif accelerator.mixed_precision != "no": UpperCamelCase : List[Any] = 8 else: UpperCamelCase : Any = None return tokenizer.pad( _lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. UpperCamelCase : Union[str, Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : str = mocked_dataloaders # noqa: F811 def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1": UpperCamelCase : List[Any] = 2 # Initialize accelerator UpperCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase : Tuple = config["lr"] UpperCamelCase : Optional[int] = int(config["num_epochs"] ) UpperCamelCase : Any = int(config["seed"] ) UpperCamelCase : Dict = int(config["batch_size"] ) UpperCamelCase : Tuple = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation UpperCamelCase : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCamelCase : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE UpperCamelCase : int = MAX_GPU_BATCH_SIZE set_seed(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Optional[Any] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase : Tuple = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler UpperCamelCase : Any = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase : Any = model(**_lowerCAmelCase ) UpperCamelCase : Optional[Any] = outputs.loss UpperCamelCase : List[Any] = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() UpperCamelCase : List[str] = 0 for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase : str = model(**_lowerCAmelCase ) UpperCamelCase : List[str] = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase : Union[str, Any] = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_lowerCAmelCase ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples UpperCamelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase : Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) UpperCamelCase : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase ) def A_ ( ) -> Any: UpperCamelCase : Any = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : Any = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
38
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer'] _UpperCAmelCase :Tuple = 'BlipImageProcessor' _UpperCAmelCase :Optional[int] = 'AutoTokenizer' def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = False super().__init__(A_ , A_ ) UpperCamelCase : str = self.image_processor def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: UpperCamelCase : int = self.tokenizer UpperCamelCase : Optional[int] = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ ) if text is not None: UpperCamelCase : Dict = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: UpperCamelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.tokenizer.model_input_names UpperCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
38
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ ( __snake_case ): def __init__( self , *A_ , **A_ ): '''simple docstring''' warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
38
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Tuple = ['audio_values', 'audio_mask'] def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ): '''simple docstring''' super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) UpperCamelCase : Optional[int] = spectrogram_length UpperCamelCase : Dict = num_channels UpperCamelCase : Optional[Any] = patch_size UpperCamelCase : str = feature_size // self.patch_size[1] UpperCamelCase : List[str] = n_fft UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate UpperCamelCase : Optional[int] = sampling_rate UpperCamelCase : int = padding_value UpperCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = spectrogram( A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) UpperCamelCase : List[Any] = log_spec[:, :-1] UpperCamelCase : Optional[int] = log_spec - 20.0 UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCamelCase : str = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCamelCase : List[str] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCamelCase : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCamelCase : List[str] = padded_audio_features * self.padding_value for i in range(len(A_ ) ): UpperCamelCase : Union[str, Any] = audio_features[i] UpperCamelCase : Optional[int] = feature # return as BatchFeature if return_attention_mask: UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: UpperCamelCase : int = {"audio_values": padded_audio_features} UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
38
1
import random def A_ ( _lowerCAmelCase ) -> bool: UpperCamelCase : List[Any] = num - 1 UpperCamelCase : Union[str, Any] = 0 while s % 2 == 0: UpperCamelCase : List[Any] = s // 2 t += 1 for _ in range(5 ): UpperCamelCase : List[str] = random.randrange(2 , num - 1 ) UpperCamelCase : Optional[int] = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if v != 1: UpperCamelCase : str = 0 while v != (num - 1): if i == t - 1: return False else: UpperCamelCase : Any = i + 1 UpperCamelCase : List[str] = (v**2) % num return True def A_ ( _lowerCAmelCase ) -> bool: if num < 2: return False UpperCamelCase : str = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(_lowerCAmelCase ) def A_ ( _lowerCAmelCase = 1024 ) -> int: while True: UpperCamelCase : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(_lowerCAmelCase ): return num if __name__ == "__main__": __lowerCamelCase : List[Any] = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
38
from __future__ import annotations from random import random from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar("""KT""") __lowerCamelCase : Dict = TypeVar("""VT""") class A__ ( Generic[KT, VT] ): def __init__( self , A_ = "root" , A_ = None ): '''simple docstring''' UpperCamelCase : int = key UpperCamelCase : List[Any] = value UpperCamelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F"""Node({self.key}: {self.value})""" @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.forward ) class A__ ( Generic[KT, VT] ): def __init__( self , A_ = 0.5 , A_ = 16 ): '''simple docstring''' UpperCamelCase : Node[KT, VT] = Node[KT, VT]() UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = p UpperCamelCase : List[str] = max_level def __str__( self ): '''simple docstring''' UpperCamelCase : int = list(self ) if len(A_ ) == 0: return F"""SkipList(level={self.level})""" UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 ) UpperCamelCase : Dict = max(A_ , 4 ) + 4 UpperCamelCase : str = self.head UpperCamelCase : List[Any] = [] UpperCamelCase : int = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) ) lines.append(" " * label_size + "| " * len(A_ ) ) while len(node.forward ) != 0: UpperCamelCase : Union[str, Any] = node.forward[0] lines.append( F"""[{node.key}]""".ljust(A_ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(A_ ) ) UpperCamelCase : Tuple = node.forward lines.append("None".ljust(A_ ) + "* " * len(A_ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(A_ ) def __iter__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCamelCase : Union[str, Any] = node.forward[0] def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = 1 while random() < self.p and level < self.max_level: level += 1 return level def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = [] UpperCamelCase : List[Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase : str = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(A_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ ) if node is not None: for i, update_node in enumerate(A_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase : Tuple = node.forward[i] else: UpperCamelCase : List[Any] = update_node.forward[:i] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ ) if node is not None: UpperCamelCase : Union[str, Any] = value else: UpperCamelCase : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , A_ ): update_vector.append(self.head ) UpperCamelCase : Optional[int] = level UpperCamelCase : Dict = Node(A_ , A_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(A_ ) else: UpperCamelCase : List[Any] = new_node def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ ) if node is not None: return node.value return None def A_ ( ) -> List[Any]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) UpperCamelCase : Optional[int] = skip_list.head UpperCamelCase : List[str] = {} while node.level != 0: UpperCamelCase : str = node.forward[0] UpperCamelCase : Optional[int] = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def A_ ( ) -> List[Any]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) UpperCamelCase : Dict = skip_list.head UpperCamelCase : Tuple = {} while node.level != 0: UpperCamelCase : List[str] = node.forward[0] UpperCamelCase : Dict = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() assert skip_list.find("Some key" ) is None def A_ ( ) -> Tuple: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def A_ ( ) -> Dict: UpperCamelCase : Optional[int] = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def A_ ( ) -> Dict: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[str]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(_lowerCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def A_ ( ) -> Union[str, Any]: def is_sorted(_lowerCAmelCase ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) UpperCamelCase : int = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def A_ ( ) -> Tuple: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[int] = parent UpperCamelCase : Any = batch_size UpperCamelCase : List[str] = seq_length UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : int = use_input_mask UpperCamelCase : int = use_token_type_ids UpperCamelCase : Any = use_labels UpperCamelCase : Optional[Any] = vocab_size UpperCamelCase : List[Any] = hidden_size UpperCamelCase : Optional[int] = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : List[str] = intermediate_size UpperCamelCase : List[str] = hidden_act UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Optional[int] = attention_probs_dropout_prob UpperCamelCase : int = max_position_embeddings UpperCamelCase : Dict = type_vocab_size UpperCamelCase : Optional[Any] = type_sequence_label_size UpperCamelCase : List[str] = initializer_range UpperCamelCase : Optional[int] = num_labels UpperCamelCase : List[str] = num_choices UpperCamelCase : List[Any] = scope UpperCamelCase : Optional[Any] = vocab_size - 1 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : List[str] = None if self.use_input_mask: UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : int = None if self.use_labels: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Tuple = self.get_config() return config, input_ids, input_mask, token_labels def __UpperCamelCase( self ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = self.prepare_config_and_inputs() UpperCamelCase : Dict = True return config, input_ids, input_mask, token_labels def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = GPTNeoXModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Any = model(A_ , attention_mask=A_ ) UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = True UpperCamelCase : Optional[Any] = GPTNeoXModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = GPTNeoXForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : int = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.num_labels UpperCamelCase : Union[str, Any] = GPTNeoXForQuestionAnswering(A_ ) model.to(A_ ) model.eval() UpperCamelCase : int = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : List[str] = GPTNeoXForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Optional[int] = GPTNeoXForTokenClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = True UpperCamelCase : Tuple = GPTNeoXForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass UpperCamelCase : str = model(A_ , attention_mask=A_ , use_cache=A_ ) UpperCamelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , output_hidden_states=A_ ) UpperCamelCase : Any = output_from_no_past["hidden_states"][0] UpperCamelCase : List[str] = model( A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["hidden_states"][0] # select random slice UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :str = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :str = (GPTNeoXForCausalLM,) if is_torch_available() else () _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Any = False _UpperCAmelCase :List[str] = False _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Dict = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = GPTNeoXModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=A_ , hidden_size=64 , num_attention_heads=8 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase : Optional[int] = None self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @unittest.skip(reason="Feed forward chunking is not implemented" ) def __UpperCamelCase( self ): '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)] ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Tuple = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase : Dict = GPTNeoXModel(A_ ) original_model.to(A_ ) original_model.eval() UpperCamelCase : List[Any] = original_model(A_ ).last_hidden_state UpperCamelCase : Optional[Any] = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase : Union[str, Any] = {"type": scaling_type, "factor": 10.0} UpperCamelCase : List[Any] = GPTNeoXModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() UpperCamelCase : Dict = scaled_model(A_ ).last_hidden_state UpperCamelCase : Any = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" ) for checkpointing in [True, False]: UpperCamelCase : str = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A_ ) UpperCamelCase : Optional[Any] = tokenizer("My favorite food is" , return_tensors="pt" ).to(A_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCamelCase : Optional[int] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" UpperCamelCase : List[Any] = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 ) UpperCamelCase : int = tokenizer.batch_decode(A_ )[0] self.assertEqual(A_ , A_ )
38
from PIL import Image def A_ ( _lowerCAmelCase ) -> Image: UpperCamelCase , UpperCamelCase : List[Any] = image.size UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : List[str] = image.load() for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): UpperCamelCase : List[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCAmelCase ): for i in range(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
38
1
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def A_ ( ) -> Dict: UpperCamelCase : Any = torch.nn.Linear(2 , 4 ) UpperCamelCase : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) UpperCamelCase : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) UpperCamelCase : str = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) UpperCamelCase : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def A_ ( _lowerCAmelCase ) -> Tuple: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_lowerCAmelCase ) class A__ ( __snake_case ): @require_cuda def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(A_ ): UpperCamelCase : Union[str, Any] = Accelerator(cpu=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = Accelerator() UpperCamelCase : Dict = GradientState() assert state.num_steps == 1 UpperCamelCase : Tuple = 4 assert state.num_steps == 4 assert state.sync_gradients is True UpperCamelCase : Dict = False assert state.sync_gradients is False GradientState._reset_state() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = create_components() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = accelerator.prepare(A_ , A_ , A_ , A_ , A_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = create_components() accelerator.prepare(A_ , A_ , A_ , A_ , A_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __UpperCamelCase( self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*A_ , **A_ ): pass with patch("torch.cuda.set_device" , A_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): UpperCamelCase : Dict = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = create_components() accelerator.prepare(A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Any = get_signature(A_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(A_ ) # make sure random weights don't match load_random_weights(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components() accelerator.prepare(A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Any = get_signature(A_ ) # saving hook def save_config(A_ , A_ , A_ ): UpperCamelCase : Dict = {"class_name": models[0].__class__.__name__} with open(os.path.join(A_ , "data.json" ) , "w" ) as f: json.dump(A_ , A_ ) # loading hook def load_config(A_ , A_ ): with open(os.path.join(A_ , "data.json" ) , "r" ) as f: UpperCamelCase : int = json.load(A_ ) UpperCamelCase : List[str] = config["class_name"] UpperCamelCase : List[Any] = accelerator.register_save_state_pre_hook(A_ ) UpperCamelCase : List[Any] = accelerator.register_load_state_pre_hook(A_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(A_ ) # make sure random weights don't match with hooks load_random_weights(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 ) # random class name to verify correct one is loaded UpperCamelCase : Optional[Any] = "random" # make sure loaded weights match with hooks accelerator.load_state(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(A_ ) # make sure random weights don't match with hooks removed load_random_weights(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 ) # random class name to verify correct one is loaded UpperCamelCase : int = "random" # make sure loaded weights match with hooks removed accelerator.load_state(A_ ) self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = create_components() UpperCamelCase : Dict = None # This should work UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = accelerator.prepare( A_ , A_ , A_ , A_ , A_ , A_ ) self.assertTrue(dummy_obj is None ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = Accelerator() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = create_components() UpperCamelCase : int = [1, 2, 3] # This should work UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = accelerator.prepare( A_ , A_ , A_ , A_ , A_ , A_ ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def __UpperCamelCase( self ): '''simple docstring''' from transformers import AutoModelForCausalLM UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map={"": 0} , ) UpperCamelCase : Any = Accelerator() # This should work UpperCamelCase : str = accelerator.prepare(A_ ) @slow @require_bnb def __UpperCamelCase( self ): '''simple docstring''' from transformers import AutoModelForCausalLM UpperCamelCase : Optional[Any] = Accelerator() with init_empty_weights(): UpperCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() UpperCamelCase : Tuple = infer_auto_device_map(A_ ) UpperCamelCase : Tuple = "cpu" UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=A_ , load_in_abit=A_ , llm_inta_enable_fpaa_cpu_offload=A_ ) # This should not work and get value error with self.assertRaises(A_ ): UpperCamelCase : Any = accelerator.prepare(A_ ) @slow @require_bnb @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' from transformers import AutoModelForCausalLM UpperCamelCase : str = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() UpperCamelCase : Union[str, Any] = infer_auto_device_map(A_ ) UpperCamelCase : int = 1 UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map=A_ , ) UpperCamelCase : List[Any] = Accelerator() # This should not work and get value error with self.assertRaises(A_ ): UpperCamelCase : int = accelerator.prepare(A_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) UpperCamelCase : Optional[int] = infer_auto_device_map(A_ ) UpperCamelCase : Any = 1 UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map=A_ , ) UpperCamelCase : List[str] = Accelerator() # This should work UpperCamelCase : Optional[Any] = accelerator.prepare(A_ ) @require_cuda def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = torch.nn.Linear(10 , 10 ) UpperCamelCase : Tuple = torch.optim.SGD(model.parameters() , lr=0.01 ) UpperCamelCase : Tuple = Accelerator(cpu=A_ ) UpperCamelCase : str = accelerator.prepare(A_ )
38
from math import loga def A_ ( _lowerCAmelCase ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
38
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : int = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
from __future__ import annotations __lowerCamelCase : Optional[int] = """Muhammad Umer Farooq""" __lowerCamelCase : Tuple = """MIT""" __lowerCamelCase : Optional[int] = """1.0.0""" __lowerCamelCase : int = """Muhammad Umer Farooq""" __lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me""" __lowerCamelCase : Dict = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' super().__init__() UpperCamelCase : list[str] = [] UpperCamelCase : str = domain def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase : Any = parse.urljoin(self.domain , A_ ) self.urls.append(A_ ) def A_ ( _lowerCAmelCase ) -> str: return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] ) def A_ ( _lowerCAmelCase ) -> str: return parse.urlparse(_lowerCAmelCase ).netloc def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]: UpperCamelCase : int = get_domain_name(_lowerCAmelCase ) # Initialize the parser UpperCamelCase : str = Parser(_lowerCAmelCase ) try: # Open URL UpperCamelCase : int = requests.get(_lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase : Optional[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ) # Get the valid email. UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = emails_from_url("""https://github.com""") print(f"""{len(emails)} emails found:""") print("""\n""".join(sorted(emails)))
38
1
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def A_ ( _lowerCAmelCase ) -> Union[str, Any]: if hor == 128: UpperCamelCase : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") UpperCamelCase : Tuple = (32, 128, 256) UpperCamelCase : List[str] = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: UpperCamelCase : Union[str, Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") UpperCamelCase : str = (32, 64, 128, 256) UpperCamelCase : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") UpperCamelCase : List[Any] = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) UpperCamelCase : Optional[int] = model.state_dict() UpperCamelCase : Union[str, Any] = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_5536, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } UpperCamelCase : int = UNetaDModel(**_lowerCAmelCase ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase : Tuple = state_dict.pop(_lowerCAmelCase ) hf_value_function.load_state_dict(_lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def A_ ( ) -> int: UpperCamelCase : Tuple = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 128, 256), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_5536, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } UpperCamelCase : Tuple = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) UpperCamelCase : int = model UpperCamelCase : Tuple = UNetaDModel(**_lowerCAmelCase ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase ) hf_value_function.load_state_dict(_lowerCAmelCase ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
38
from __future__ import annotations def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : Optional[Any] = [True] * limit UpperCamelCase : Optional[Any] = False UpperCamelCase : List[str] = False UpperCamelCase : Tuple = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase : Optional[Any] = i * 2 while index < limit: UpperCamelCase : int = False UpperCamelCase : Optional[int] = index + i UpperCamelCase : Any = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def A_ ( _lowerCAmelCase = 100_0000 ) -> int: UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = 0 for i in range(len(_lowerCAmelCase ) ): for j in range(i + length , len(_lowerCAmelCase ) ): UpperCamelCase : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase : int = j - i UpperCamelCase : Dict = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
38
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def A_ ( _lowerCAmelCase ) -> Tuple: UpperCamelCase : Any = botoa.client("iam" ) UpperCamelCase : Any = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_lowerCAmelCase , AssumeRolePolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) ) UpperCamelCase : Dict = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=_lowerCAmelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F"""role {role_name} already exists. Using existing one""" ) def A_ ( _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : int = botoa.client("iam" ) return iam_client.get_role(RoleName=_lowerCAmelCase )["Role"]["Arn"] def A_ ( ) -> Dict: UpperCamelCase : str = _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _lowerCAmelCase , ) UpperCamelCase : Any = None if credentials_configuration == 0: UpperCamelCase : int = _ask_field("Enter your AWS Profile name: [default] " , default="default" ) UpperCamelCase : Optional[Any] = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) UpperCamelCase : str = _ask_field("AWS Access Key ID: " ) UpperCamelCase : List[str] = aws_access_key_id UpperCamelCase : Optional[int] = _ask_field("AWS Secret Access Key: " ) UpperCamelCase : Optional[int] = aws_secret_access_key UpperCamelCase : Union[str, Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) UpperCamelCase : Optional[int] = aws_region UpperCamelCase : int = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _lowerCAmelCase , ) if role_management == 0: UpperCamelCase : List[str] = _ask_field("Enter your IAM role name: " ) else: UpperCamelCase : Tuple = "accelerate_sagemaker_execution_role" print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" ) _create_iam_role_for_sagemaker(_lowerCAmelCase ) UpperCamelCase : str = _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) UpperCamelCase : Union[str, Any] = None if is_custom_docker_image: UpperCamelCase : Optional[Any] = _ask_field("Enter your Docker image: " , lambda _lowerCAmelCase : str(_lowerCAmelCase ).lower() ) UpperCamelCase : int = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) UpperCamelCase : Any = None if is_sagemaker_inputs_enabled: UpperCamelCase : str = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda _lowerCAmelCase : str(_lowerCAmelCase ).lower() , ) UpperCamelCase : Optional[Any] = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) UpperCamelCase : int = None if is_sagemaker_metrics_enabled: UpperCamelCase : Dict = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda _lowerCAmelCase : str(_lowerCAmelCase ).lower() , ) UpperCamelCase : List[Any] = _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) UpperCamelCase : Dict = {} UpperCamelCase : int = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) if use_dynamo: UpperCamelCase : Optional[Any] = "dynamo_" UpperCamelCase : Tuple = _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) UpperCamelCase : Tuple = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) if use_custom_options: UpperCamelCase : Any = _ask_options( "Which mode do you want to use?" , _lowerCAmelCase , lambda _lowerCAmelCase : TORCH_DYNAMO_MODES[int(_lowerCAmelCase )] , default="default" , ) UpperCamelCase : str = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) UpperCamelCase : int = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) UpperCamelCase : Any = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: UpperCamelCase : Union[str, Any] = _ask_options( _lowerCAmelCase , _lowerCAmelCase , lambda _lowerCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCAmelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" UpperCamelCase : Union[str, Any] = _ask_field(_lowerCAmelCase , lambda _lowerCAmelCase : str(_lowerCAmelCase ).lower() , default="ml.p3.2xlarge" ) UpperCamelCase : Any = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): UpperCamelCase : Union[str, Any] = _ask_field( "How many machines do you want use? [1]: " , _lowerCAmelCase , default=1 , ) UpperCamelCase : Optional[int] = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=_lowerCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCAmelCase , use_cpu=_lowerCAmelCase , dynamo_config=_lowerCAmelCase , eca_instance_type=_lowerCAmelCase , profile=_lowerCAmelCase , region=_lowerCAmelCase , iam_role_name=_lowerCAmelCase , mixed_precision=_lowerCAmelCase , num_machines=_lowerCAmelCase , sagemaker_inputs_file=_lowerCAmelCase , sagemaker_metrics_file=_lowerCAmelCase , )
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __snake_case ): def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ): '''simple docstring''' super().__init__( features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , ) UpperCamelCase : Optional[int] = Generator( cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , ) def __UpperCamelCase( self ): '''simple docstring''' if self.streaming: UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: UpperCamelCase : Union[str, Any] = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = None UpperCamelCase : List[str] = None self.builder.download_and_prepare( download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , ) UpperCamelCase : int = self.builder.as_dataset( split="train" , verification_mode=A_ , in_memory=self.keep_in_memory ) return dataset
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing return x.sum() def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing return i + 1 @dataclass class A__ : _UpperCAmelCase :int _UpperCAmelCase :str class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = [1, 2] UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]} UpperCamelCase : Any = {"a": {"1": 1}, "b": 2} UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4} UpperCamelCase : Dict = {} UpperCamelCase : Any = [] UpperCamelCase : Any = 2 UpperCamelCase : Any = [2, 3] UpperCamelCase : Optional[Any] = {"a": 2, "b": 3} UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]} UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3} UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) UpperCamelCase : List[str] = 2 self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} UpperCamelCase : int = {"a": 2, "b": 0, "c": 2} UpperCamelCase : Union[str, Any] = { "a": np.eye(2 ).astype(A_ ), "b": np.zeros(3 ).astype(A_ ), "c": np.ones(2 ).astype(A_ ), } self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A_ ): # can't pickle a local lambda map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : List[Any] = {"a": 3, "b": 4} UpperCamelCase : Tuple = {"a": 5, "b": 6} UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' class A__ : _UpperCAmelCase :str = 'bar' UpperCamelCase : List[Any] = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(A_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )} UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A__ ( __snake_case ): @require_tf def __UpperCamelCase( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers UpperCamelCase : int = layers.Dense(2 ) def gen_random_output(): UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) ) return model(A_ ).numpy() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : List[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : Dict = gen_random_output() UpperCamelCase : Optional[int] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch def gen_random_output(): UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 ) UpperCamelCase : Dict = torch.rand(1 , 3 ) return model(A_ ).detach().numpy() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Dict = gen_random_output() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Optional[int] = gen_random_output() UpperCamelCase : List[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __UpperCamelCase( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() UpperCamelCase : Optional[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}] ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten() assert output == expected_output def A_ ( ) -> List[Any]: UpperCamelCase : str = A(x=1 , y="foobar" ) UpperCamelCase : Tuple = {"x": 1, "y": "foobar"} assert asdict(_lowerCAmelCase ) == expected_output UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCAmelCase ) == expected_output with pytest.raises(_lowerCAmelCase ): asdict([1, A(x=10 , y="foo" )] ) def A_ ( _lowerCAmelCase ) -> Tuple: return text.split() def A_ ( _lowerCAmelCase ) -> Dict: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A_ ( ) -> str: with Pool(2 ) as pool: UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCamelCase : Any = [] for yield_time, content in iflatmap_unordered( _lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCAmelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(_lowerCAmelCase ) == 4
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Optional[Any] = { """configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""], """tokenization_lxmert""": ["""LxmertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = ["""LxmertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """LxmertEncoder""", """LxmertForPreTraining""", """LxmertForQuestionAnswering""", """LxmertModel""", """LxmertPreTrainedModel""", """LxmertVisualFeatureEncoder""", """LxmertXLayer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLxmertForPreTraining""", """TFLxmertMainLayer""", """TFLxmertModel""", """TFLxmertPreTrainedModel""", """TFLxmertVisualFeatureEncoder""", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
from ..utils import DummyObject, requires_backends class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['note_seq'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] )
38
1
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class A__ : @staticmethod def __UpperCamelCase( *A_ , **A_ ): '''simple docstring''' pass def A_ ( _lowerCAmelCase ) -> Optional[int]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __lowerCamelCase : List[str] = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class A__ ( unittest.TestCase ): _UpperCAmelCase :Optional[int] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = pipeline( "document-question-answering" , model=A_ , tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Union[str, Any] = INVOICE_URL UpperCamelCase : List[Any] = list(zip(*apply_tesseract(load_image(A_ ) , A_ , "" ) ) ) UpperCamelCase : Dict = "What is the placebo?" UpperCamelCase : Any = [ { "image": load_image(A_ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = dqa_pipeline(A_ , top_k=2 ) self.assertEqual( A_ , [ [ {"score": ANY(A_ ), "answer": ANY(A_ ), "start": ANY(A_ ), "end": ANY(A_ )}, {"score": ANY(A_ ), "answer": ANY(A_ ), "start": ANY(A_ ), "end": ANY(A_ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) UpperCamelCase : List[str] = INVOICE_URL UpperCamelCase : Dict = "How many cats are there?" UpperCamelCase : int = [ {"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] UpperCamelCase : int = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ ) UpperCamelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably UpperCamelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCamelCase : List[Any] = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual(A_ , [] ) # We can optionnally pass directly the words and bounding boxes UpperCamelCase : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : str = dqa_pipeline(image=A_ , question=A_ , words=A_ , boxes=A_ , top_k=2 ) self.assertEqual(A_ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) UpperCamelCase : Dict = INVOICE_URL UpperCamelCase : int = "What is the invoice number?" UpperCamelCase : int = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase : Union[str, Any] = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) UpperCamelCase : List[Any] = INVOICE_URL UpperCamelCase : Optional[int] = "What is the invoice number?" UpperCamelCase : Dict = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase : Dict = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=A_ ) UpperCamelCase : str = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=A_ , revision="3dc6de3" , ) UpperCamelCase : Tuple = INVOICE_URL UpperCamelCase : Any = "What is the invoice number?" UpperCamelCase : Optional[Any] = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) UpperCamelCase : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) UpperCamelCase : Optional[Any] = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) UpperCamelCase : List[str] = list(zip(*apply_tesseract(load_image(A_ ) , A_ , "" ) ) ) # This model should also work if `image` is set to None UpperCamelCase : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=A_ ) UpperCamelCase : List[str] = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=A_ , revision="3dc6de3" , max_seq_len=50 , ) UpperCamelCase : Optional[int] = INVOICE_URL UpperCamelCase : Union[str, Any] = "What is the invoice number?" UpperCamelCase : Dict = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase : str = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) UpperCamelCase : str = list(zip(*apply_tesseract(load_image(A_ ) , A_ , "" ) ) ) # This model should also work if `image` is set to None UpperCamelCase : int = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) UpperCamelCase : Tuple = INVOICE_URL UpperCamelCase : Tuple = "What is the invoice number?" UpperCamelCase : Optional[Any] = dqa_pipeline(image=A_ , question=A_ , top_k=2 ) self.assertEqual(nested_simplify(A_ , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def __UpperCamelCase( self ): '''simple docstring''' pass
38
import math import tensorflow as tf from packaging import version def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) )) return x * cdf def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase ) return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def A_ ( _lowerCAmelCase ) -> List[Any]: return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 ) def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str: UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase ) return a * tf.math.sigmoid(_lowerCAmelCase ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def A_ ( _lowerCAmelCase ) -> Any: return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase ) __lowerCamelCase : Optional[int] = tf.keras.activations.gelu __lowerCamelCase : int = approximate_gelu_wrap else: __lowerCamelCase : List[Any] = _gelu __lowerCamelCase : Optional[Any] = _gelu_new __lowerCamelCase : Any = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def A_ ( _lowerCAmelCase ) -> Optional[Any]: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
38
1
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __lowerCamelCase : Any = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def A_ ( _lowerCAmelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: UpperCamelCase : Tuple = BeautifulSoup(requests.get(url + location ).content , "html.parser" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ): UpperCamelCase : Any = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip() UpperCamelCase : Tuple = job.find("span" , {"class": "company"} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
38
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :str = KandinskyVaaPipeline _UpperCAmelCase :str = [ 'image_embeds', 'negative_image_embeds', ] _UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds'] _UpperCAmelCase :List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :List[str] = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : Dict = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Dict = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) if str(A_ ).startswith("mps" ): UpperCamelCase : Optional[Any] = torch.manual_seed(A_ ) else: UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Optional[int] = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "cpu" UpperCamelCase : List[str] = self.get_dummy_components() UpperCamelCase : Tuple = self.pipeline_class(**A_ ) UpperCamelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : Optional[int] = output.images UpperCamelCase : int = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : Tuple = image[0, -3:, -3:, -1] UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : int = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) UpperCamelCase : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : str = "red cat, 4k photo" UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Tuple = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase : Tuple = pipeline( image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_ , A_ )
38
1
import os from math import logaa def A_ ( _lowerCAmelCase = "base_exp.txt" ) -> int: UpperCamelCase : float = 0 UpperCamelCase : str = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) ): UpperCamelCase , UpperCamelCase : Any = list(map(_lowerCAmelCase , line.split("," ) ) ) if x * logaa(_lowerCAmelCase ) > largest: UpperCamelCase : Optional[int] = x * logaa(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = i + 1 return result if __name__ == "__main__": print(solution())
38
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A_ ( ) -> Dict: UpperCamelCase : Tuple = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_lowerCAmelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_lowerCAmelCase ) return parser.parse_args() def A_ ( ) -> Optional[int]: UpperCamelCase : Tuple = parse_args() # Import training_script as a module. UpperCamelCase : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase : List[Any] = script_fpath.stem UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase ) # Patch sys.argv UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
38
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase : str = logging.get_logger(__name__) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : List[Any] = b.T UpperCamelCase : Union[str, Any] = np.sum(np.square(_lowerCAmelCase ) , axis=1 ) UpperCamelCase : List[str] = np.sum(np.square(_lowerCAmelCase ) , axis=0 ) UpperCamelCase : Dict = np.matmul(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : Dict = x.reshape(-1 , 3 ) UpperCamelCase : List[Any] = squared_euclidean_distance(_lowerCAmelCase , _lowerCAmelCase ) return np.argmin(_lowerCAmelCase , axis=1 ) class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['pixel_values'] def __init__( self , A_ = None , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = True , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : List[str] = size if size is not None else {"height": 256, "width": 256} UpperCamelCase : Optional[int] = get_size_dict(A_ ) UpperCamelCase : int = np.array(A_ ) if clusters is not None else None UpperCamelCase : Optional[Any] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = resample UpperCamelCase : Dict = do_normalize UpperCamelCase : Any = do_color_quantize def __UpperCamelCase( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Dict = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" ) return resize( A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = rescale(image=A_ , scale=1 / 1_27.5 , data_format=A_ ) UpperCamelCase : Tuple = image - 1 return image def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ): '''simple docstring''' UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize UpperCamelCase : str = size if size is not None else self.size UpperCamelCase : List[str] = get_size_dict(A_ ) UpperCamelCase : Any = resample if resample is not None else self.resample UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize UpperCamelCase : Tuple = clusters if clusters is not None else self.clusters UpperCamelCase : Dict = np.array(A_ ) UpperCamelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. UpperCamelCase : int = [to_numpy_array(A_ ) for image in images] if do_resize: UpperCamelCase : str = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_normalize: UpperCamelCase : Any = [self.normalize(image=A_ ) for image in images] if do_color_quantize: UpperCamelCase : Optional[int] = [to_channel_dimension_format(A_ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) UpperCamelCase : Optional[Any] = np.array(A_ ) UpperCamelCase : str = color_quantize(A_ , A_ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) UpperCamelCase : int = images.shape[0] UpperCamelCase : Optional[int] = images.reshape(A_ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. UpperCamelCase : Any = list(A_ ) else: UpperCamelCase : str = [to_channel_dimension_format(A_ , A_ ) for image in images] UpperCamelCase : Dict = {"input_ids": images} return BatchFeature(data=A_ , tensor_type=A_ )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class A__ ( __snake_case ): def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Any = path_or_paths UpperCamelCase : Any = split if split or isinstance(A_ , A_ ) else "train" UpperCamelCase : Union[str, Any] = features UpperCamelCase : Dict = cache_dir UpperCamelCase : Dict = keep_in_memory UpperCamelCase : Dict = streaming UpperCamelCase : str = num_proc UpperCamelCase : Union[str, Any] = kwargs @abstractmethod def __UpperCamelCase( self ): '''simple docstring''' pass class A__ ( __snake_case ): def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = features UpperCamelCase : Optional[int] = cache_dir UpperCamelCase : List[str] = keep_in_memory UpperCamelCase : Tuple = streaming UpperCamelCase : List[str] = num_proc UpperCamelCase : Any = kwargs @abstractmethod def __UpperCamelCase( self ): '''simple docstring''' pass
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18} UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : Union[str, Any] = num_frames UpperCamelCase : Any = image_size UpperCamelCase : Tuple = min_resolution UpperCamelCase : Optional[Any] = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Tuple = size UpperCamelCase : List[Any] = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Any = image_std UpperCamelCase : str = crop_size def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = VivitImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
38
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Tuple = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :int = 'roc_bert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=0 , A_="absolute" , A_=None , A_=True , A_=True , A_=768 , A_=910 , A_=512 , A_=2_4858 , A_=True , **A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : str = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : Any = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Tuple = type_vocab_size UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Optional[int] = use_cache UpperCamelCase : Tuple = enable_pronunciation UpperCamelCase : Optional[int] = enable_shape UpperCamelCase : Optional[Any] = pronunciation_embed_dim UpperCamelCase : List[str] = pronunciation_vocab_size UpperCamelCase : Optional[int] = shape_embed_dim UpperCamelCase : Tuple = shape_vocab_size UpperCamelCase : Union[str, Any] = concat_input UpperCamelCase : Union[str, Any] = position_embedding_type UpperCamelCase : Tuple = classifier_dropout super().__init__(pad_token_id=A_ , **A_ )
38
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase : Dict = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase : Tuple = { """facebook/blenderbot_small-90M""": 512, } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , ) UpperCamelCase : Union[str, Any] = add_prefix_space def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
38
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = logging.get_logger() # the current default level is logging.WARNING UpperCamelCase : Any = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = logging.get_verbosity() UpperCamelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCamelCase : Optional[Any] = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(A_ ) as cl: logger.warning(A_ ) self.assertEqual(cl.out , msg + "\n" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(A_ ) as cl: logger.warning(A_ ) self.assertEqual(cl.out , "" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(A_ ) as cl: logger.warning(A_ ) self.assertEqual(cl.out , msg + "\n" ) # restore to the original level logging.set_verbosity(A_ ) @mockenv(TRANSFORMERS_VERBOSITY="error" ) def __UpperCamelCase( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var UpperCamelCase : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCamelCase : Tuple = os.getenv("TRANSFORMERS_VERBOSITY" , A_ ) UpperCamelCase : List[str] = logging.log_levels[env_level_str] UpperCamelCase : str = logging.get_verbosity() self.assertEqual( A_ , A_ , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level UpperCamelCase : Any = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error" ) def __UpperCamelCase( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() UpperCamelCase : Optional[int] = logging.logging.getLogger() with CaptureLogger(A_ ) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart" ) self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out ) # no need to restore as nothing was changed def __UpperCamelCase( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() UpperCamelCase : Union[str, Any] = logging.get_logger("transformers.models.bart.tokenization_bart" ) UpperCamelCase : List[Any] = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ): # nothing should be logged as env var disables this method with CaptureLogger(A_ ) as cl: logger.warning_advice(A_ ) self.assertEqual(cl.out , "" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(A_ ) as cl: logger.warning_advice(A_ ) self.assertEqual(cl.out , msg + "\n" ) def A_ ( ) -> Optional[Any]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : int = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = arr.split("," ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = [int(self.array[0] )] * len(self.array ) UpperCamelCase : Optional[int] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): UpperCamelCase : int = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) UpperCamelCase : str = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __lowerCamelCase : List[str] = input("""please input some numbers:""") __lowerCamelCase : Optional[int] = SubArray(whole_array) __lowerCamelCase : Dict = array.solve_sub_array() print(("""the results is:""", re))
38
import logging import os import threading import time try: import warnings except ImportError: __lowerCamelCase : str = None try: import msvcrt except ImportError: __lowerCamelCase : str = None try: import fcntl except ImportError: __lowerCamelCase : List[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowerCamelCase : Union[str, Any] = OSError # Data # ------------------------------------------------ __lowerCamelCase : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowerCamelCase : Union[str, Any] = """3.0.12""" __lowerCamelCase : Any = None def A_ ( ) -> List[Any]: global _logger UpperCamelCase : Any = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = lock_file return None def __str__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.lock.release() return None class A__ : def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase : List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase : Tuple = None # The default timeout value. UpperCamelCase : Optional[Any] = timeout # We use this lock primarily for the lock counter. UpperCamelCase : Union[str, Any] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase : Dict = 0 return None @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file @property def __UpperCamelCase( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = float(A_ ) return None def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCamelCase( self , A_=None , A_=0.05 ): '''simple docstring''' if timeout is None: UpperCamelCase : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase : Dict = id(self ) UpperCamelCase : List[str] = self._lock_file UpperCamelCase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase( self , A_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase : List[Any] = id(self ) UpperCamelCase : Dict = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCamelCase : Dict = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=A_ ) return None def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase : Optional[int] = os.path.dirname(A_ ) UpperCamelCase : int = str(hash(A_ ) ) UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(A_ , A_ ) else: return path class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase : str = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self._lock_file_fd UpperCamelCase : str = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase : int = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase : List[str] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self._lock_file_fd UpperCamelCase : List[Any] = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase : Tuple = fd return None def __UpperCamelCase( self ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowerCamelCase : Dict = None if msvcrt: __lowerCamelCase : Any = WindowsFileLock elif fcntl: __lowerCamelCase : Any = UnixFileLock else: __lowerCamelCase : int = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
38
1
import math def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float: return math.pow(_lowerCAmelCase , 2 ) - a def A_ ( _lowerCAmelCase ) -> float: return 2 * x def A_ ( _lowerCAmelCase ) -> float: UpperCamelCase : Dict = 2.0 while start <= a: UpperCamelCase : Dict = math.pow(_lowerCAmelCase , 2 ) return start def A_ ( _lowerCAmelCase , _lowerCAmelCase = 9999 , _lowerCAmelCase = 0.00_000_000_000_001 ) -> float: if a < 0: raise ValueError("math domain error" ) UpperCamelCase : str = get_initial_point(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): UpperCamelCase : List[str] = value UpperCamelCase : Optional[int] = value - fx(_lowerCAmelCase , _lowerCAmelCase ) / fx_derivative(_lowerCAmelCase ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
38
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str: if config_name_or_path is None: UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCamelCase : Tuple = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCamelCase : Tuple = question_encoder_name_or_path UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : int = gen_config UpperCamelCase : Dict = question_encoder_config UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : Dict = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
38
1
from __future__ import annotations def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : Optional[Any] = [True] * limit UpperCamelCase : Optional[Any] = False UpperCamelCase : List[str] = False UpperCamelCase : Tuple = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase : Optional[Any] = i * 2 while index < limit: UpperCamelCase : int = False UpperCamelCase : Optional[int] = index + i UpperCamelCase : Any = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def A_ ( _lowerCAmelCase = 100_0000 ) -> int: UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = 0 for i in range(len(_lowerCAmelCase ) ): for j in range(i + length , len(_lowerCAmelCase ) ): UpperCamelCase : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase : int = j - i UpperCamelCase : Dict = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
38
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : str = 13 UpperCamelCase : int = 7 UpperCamelCase : str = True UpperCamelCase : Dict = True UpperCamelCase : str = True UpperCamelCase : Tuple = True UpperCamelCase : List[str] = 99 UpperCamelCase : Optional[Any] = 384 UpperCamelCase : Tuple = 2 UpperCamelCase : Union[str, Any] = 4 UpperCamelCase : Dict = 37 UpperCamelCase : Any = "gelu" UpperCamelCase : List[Any] = 0.1 UpperCamelCase : int = 0.1 UpperCamelCase : Tuple = 512 UpperCamelCase : List[Any] = 16 UpperCamelCase : int = 2 UpperCamelCase : Dict = 0.02 UpperCamelCase : Optional[Any] = 3 UpperCamelCase : List[Any] = 4 UpperCamelCase : Dict = 128 UpperCamelCase : Optional[Any] = 2 UpperCamelCase : Optional[int] = 9 UpperCamelCase : Optional[int] = 1 UpperCamelCase : Union[str, Any] = None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : str = None if self.use_input_mask: UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = None UpperCamelCase : List[Any] = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Any = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel(config=A_ ) UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Optional[int] = [input_ids, input_mask] UpperCamelCase : Any = model(A_ ) UpperCamelCase : int = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ ) UpperCamelCase : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = self.num_choices UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ ) UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ ) UpperCamelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Any = False _UpperCAmelCase :int = False _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFConvBertModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Optional[Any] = True UpperCamelCase : Any = True if hasattr(A_ , "use_cache" ): UpperCamelCase : List[str] = True UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ ) for model_class in self.all_model_classes: UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ ) UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Optional[int] = len(model(A_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" ) UpperCamelCase : Dict = tf.keras.models.load_model(A_ ) UpperCamelCase : str = model(A_ ) if self.is_encoder_decoder: UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"] UpperCamelCase : Any = outputs["encoder_attentions"] else: UpperCamelCase : Any = outputs["hidden_states"] UpperCamelCase : List[str] = outputs["attentions"] self.assertEqual(len(A_ ) , A_ ) UpperCamelCase : int = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Dict = True UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ ) UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ ) def check_decoder_attentions_output(A_ ): UpperCamelCase : Optional[Any] = len(A_ ) self.assertEqual(out_len % 2 , 0 ) UpperCamelCase : Any = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A_ ): UpperCamelCase : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True UpperCamelCase : List[Any] = False UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : List[str] = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCamelCase : Tuple = True UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine UpperCamelCase : Optional[int] = True UpperCamelCase : List[str] = True UpperCamelCase : Optional[int] = model_class(A_ ) UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @require_tf class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : List[str] = model(A_ )[0] UpperCamelCase : int = [1, 6, 768] self.assertEqual(output.shape , A_ ) UpperCamelCase : List[str] = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
38
1
import functools from typing import Any def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: # Validation if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or len(_lowerCAmelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie UpperCamelCase : dict[str, Any] = {} UpperCamelCase : Any = "WORD_KEEPER" for word in words: UpperCamelCase : Optional[int] = trie for c in word: if c not in trie_node: UpperCamelCase : Dict = {} UpperCamelCase : Any = trie_node[c] UpperCamelCase : Any = True UpperCamelCase : Dict = len(_lowerCAmelCase ) # Dynamic programming method @functools.cache def is_breakable(_lowerCAmelCase ) -> bool: if index == len_string: return True UpperCamelCase : Any = trie for i in range(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : List[Any] = trie_node.get(string[i] , _lowerCAmelCase ) if trie_node is None: return False if trie_node.get(_lowerCAmelCase , _lowerCAmelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
38
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : str = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'camembert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : List[str] = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : str = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = use_cache UpperCamelCase : List[str] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = 'token-classification' def __init__( self , A_ ): '''simple docstring''' if type(A_ ) == dict: UpperCamelCase : Any = Namespace(**A_ ) UpperCamelCase : Tuple = import_module("tasks" ) try: UpperCamelCase : Dict = getattr(A_ , hparams.task_type ) UpperCamelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) UpperCamelCase : List[Any] = self.token_classification_task.get_labels(hparams.labels ) UpperCamelCase : Any = CrossEntropyLoss().ignore_index super().__init__(A_ , len(self.labels ) , self.mode ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return self.model(**A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase : str = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase : List[str] = self(**A_ ) UpperCamelCase : Optional[int] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.hparams for mode in ["train", "dev", "test"]: UpperCamelCase : str = self._feature_file(A_ ) if os.path.exists(A_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , A_ ) UpperCamelCase : Union[str, Any] = torch.load(A_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) UpperCamelCase : int = self.token_classification_task.read_examples_from_file(args.data_dir , A_ ) UpperCamelCase : Optional[Any] = self.token_classification_task.convert_examples_to_features( A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , A_ ) torch.save(A_ , A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = False ): '''simple docstring''' UpperCamelCase : Optional[Any] = self._feature_file(A_ ) logger.info("Loading features from cached file %s" , A_ ) UpperCamelCase : str = torch.load(A_ ) UpperCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) UpperCamelCase : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: UpperCamelCase : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: UpperCamelCase : str = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) UpperCamelCase : Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' """Compute validation""" "" UpperCamelCase : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase : List[Any] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase : Optional[Any] = self(**A_ ) UpperCamelCase , UpperCamelCase : Tuple = outputs[:2] UpperCamelCase : Optional[Any] = logits.detach().cpu().numpy() UpperCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = torch.stack([x["val_loss"] for x in outputs] ).mean() UpperCamelCase : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) UpperCamelCase : List[str] = np.argmax(A_ , axis=2 ) UpperCamelCase : Union[str, Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) UpperCamelCase : List[str] = dict(enumerate(self.labels ) ) UpperCamelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )] UpperCamelCase : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) UpperCamelCase : Union[str, Any] = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(A_ , A_ ), "precision": precision_score(A_ , A_ ), "recall": recall_score(A_ , A_ ), "f1": fa_score(A_ , A_ ), } UpperCamelCase : Tuple = dict(results.items() ) UpperCamelCase : int = results return ret, preds_list, out_label_list def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self._eval_end(A_ ) UpperCamelCase : Dict = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = self._eval_end(A_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 UpperCamelCase : Tuple = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' BaseTransformer.add_model_specific_args(A_ , A_ ) parser.add_argument( "--task_type" , default="NER" , type=A_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=128 , type=A_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=A_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __lowerCamelCase : Union[str, Any] = NERTransformer.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Any = NERTransformer(args) __lowerCamelCase : Tuple = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __lowerCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True)) __lowerCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
38
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(input_a == input_a == 0 ) def A_ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :str = XLMProphetNetTokenizer _UpperCAmelCase :List[Any] = False _UpperCAmelCase :List[str] = True def __UpperCamelCase( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase : str = XLMProphetNetTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = "[PAD]" UpperCamelCase : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(A_ ) , 1012 ) def __UpperCamelCase( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = XLMProphetNetTokenizer(A_ , keep_accents=A_ ) UpperCamelCase : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) UpperCamelCase : Any = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def __UpperCamelCase( self ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = "Hello World!" UpperCamelCase : str = [3_5389, 6672, 49, 2] self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
38
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer'] _UpperCAmelCase :Tuple = 'BlipImageProcessor' _UpperCAmelCase :Optional[int] = 'AutoTokenizer' def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = False super().__init__(A_ , A_ ) UpperCamelCase : str = self.image_processor def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: UpperCamelCase : int = self.tokenizer UpperCamelCase : Optional[int] = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ ) if text is not None: UpperCamelCase : Dict = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: UpperCamelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.tokenizer.model_input_names UpperCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
38
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A__ ( unittest.TestCase , __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = load_tool("text-to-speech" ) self.tool.setup() def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Dict = self.tool("hey" ) UpperCamelCase : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = self.tool("hey" ) UpperCamelCase : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
38
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Tuple = ['audio_values', 'audio_mask'] def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ): '''simple docstring''' super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) UpperCamelCase : Optional[int] = spectrogram_length UpperCamelCase : Dict = num_channels UpperCamelCase : Optional[Any] = patch_size UpperCamelCase : str = feature_size // self.patch_size[1] UpperCamelCase : List[str] = n_fft UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate UpperCamelCase : Optional[int] = sampling_rate UpperCamelCase : int = padding_value UpperCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = spectrogram( A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) UpperCamelCase : List[Any] = log_spec[:, :-1] UpperCamelCase : Optional[int] = log_spec - 20.0 UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCamelCase : str = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCamelCase : List[str] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCamelCase : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCamelCase : List[str] = padded_audio_features * self.padding_value for i in range(len(A_ ) ): UpperCamelCase : Union[str, Any] = audio_features[i] UpperCamelCase : Optional[int] = feature # return as BatchFeature if return_attention_mask: UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: UpperCamelCase : int = {"audio_values": padded_audio_features} UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : int = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
from __future__ import annotations from random import random from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar("""KT""") __lowerCamelCase : Dict = TypeVar("""VT""") class A__ ( Generic[KT, VT] ): def __init__( self , A_ = "root" , A_ = None ): '''simple docstring''' UpperCamelCase : int = key UpperCamelCase : List[Any] = value UpperCamelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F"""Node({self.key}: {self.value})""" @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.forward ) class A__ ( Generic[KT, VT] ): def __init__( self , A_ = 0.5 , A_ = 16 ): '''simple docstring''' UpperCamelCase : Node[KT, VT] = Node[KT, VT]() UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = p UpperCamelCase : List[str] = max_level def __str__( self ): '''simple docstring''' UpperCamelCase : int = list(self ) if len(A_ ) == 0: return F"""SkipList(level={self.level})""" UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 ) UpperCamelCase : Dict = max(A_ , 4 ) + 4 UpperCamelCase : str = self.head UpperCamelCase : List[Any] = [] UpperCamelCase : int = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) ) lines.append(" " * label_size + "| " * len(A_ ) ) while len(node.forward ) != 0: UpperCamelCase : Union[str, Any] = node.forward[0] lines.append( F"""[{node.key}]""".ljust(A_ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(A_ ) ) UpperCamelCase : Tuple = node.forward lines.append("None".ljust(A_ ) + "* " * len(A_ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(A_ ) def __iter__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCamelCase : Union[str, Any] = node.forward[0] def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = 1 while random() < self.p and level < self.max_level: level += 1 return level def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = [] UpperCamelCase : List[Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase : str = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(A_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ ) if node is not None: for i, update_node in enumerate(A_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase : Tuple = node.forward[i] else: UpperCamelCase : List[Any] = update_node.forward[:i] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ ) if node is not None: UpperCamelCase : Union[str, Any] = value else: UpperCamelCase : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , A_ ): update_vector.append(self.head ) UpperCamelCase : Optional[int] = level UpperCamelCase : Dict = Node(A_ , A_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(A_ ) else: UpperCamelCase : List[Any] = new_node def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ ) if node is not None: return node.value return None def A_ ( ) -> List[Any]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) UpperCamelCase : Optional[int] = skip_list.head UpperCamelCase : List[str] = {} while node.level != 0: UpperCamelCase : str = node.forward[0] UpperCamelCase : Optional[int] = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def A_ ( ) -> List[Any]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) UpperCamelCase : Dict = skip_list.head UpperCamelCase : Tuple = {} while node.level != 0: UpperCamelCase : List[str] = node.forward[0] UpperCamelCase : Dict = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() assert skip_list.find("Some key" ) is None def A_ ( ) -> Tuple: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def A_ ( ) -> Dict: UpperCamelCase : Optional[int] = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def A_ ( ) -> Dict: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[str]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(_lowerCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def A_ ( ) -> Union[str, Any]: def is_sorted(_lowerCAmelCase ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) UpperCamelCase : int = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def A_ ( ) -> Tuple: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A__ ( __snake_case ): _UpperCAmelCase :torch.FloatTensor _UpperCAmelCase :torch.FloatTensor _UpperCAmelCase :Optional[torch.FloatTensor] = None class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :int = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.0_07 , A_ = 80 , A_ = 0.05 , A_ = 50 , ): '''simple docstring''' UpperCamelCase : List[Any] = sigma_max # setable values UpperCamelCase : int = None UpperCamelCase : np.IntTensor = None UpperCamelCase : torch.FloatTensor = None # sigma(t_i) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' return sample def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : List[Any] = num_inference_steps UpperCamelCase : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCamelCase : List[Any] = torch.from_numpy(A_ ).to(A_ ) UpperCamelCase : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCamelCase : int = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: UpperCamelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCamelCase : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) UpperCamelCase : List[str] = sigma + gamma * sigma UpperCamelCase : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ = True , ): '''simple docstring''' UpperCamelCase : List[Any] = sample_hat + sigma_hat * model_output UpperCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat UpperCamelCase : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ): '''simple docstring''' UpperCamelCase : List[Any] = sample_prev + sigma_prev * model_output UpperCamelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev UpperCamelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' raise NotImplementedError()
38
from PIL import Image def A_ ( _lowerCAmelCase ) -> Image: UpperCamelCase , UpperCamelCase : List[Any] = image.size UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : List[str] = image.load() for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): UpperCamelCase : List[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCAmelCase ): for i in range(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
38
1
from ..utils import DummyObject, requires_backends class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Any = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[int] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: requires_backends(_lowerCAmelCase , ["torch"] ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(_lowerCAmelCase , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Any = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Union[str, Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[int] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[int] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Any = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Any = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[int] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :List[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :List[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :int = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :List[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Union[str, Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Union[str, Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :List[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Dict = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :str = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Union[str, Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Optional[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) class A__ ( metaclass=__snake_case ): _UpperCAmelCase :List[Any] = ['torch'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["torch"] )
38
from math import loga def A_ ( _lowerCAmelCase ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
38
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" ) UpperCamelCase : Optional[int] = { "input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } UpperCamelCase : Optional[Any] = model(A_ )["last_hidden_state"] UpperCamelCase : Optional[int] = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , A_ ) # compare the actual values for a slice. UpperCamelCase : Any = tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
38
from __future__ import annotations __lowerCamelCase : Optional[int] = """Muhammad Umer Farooq""" __lowerCamelCase : Tuple = """MIT""" __lowerCamelCase : Optional[int] = """1.0.0""" __lowerCamelCase : int = """Muhammad Umer Farooq""" __lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me""" __lowerCamelCase : Dict = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' super().__init__() UpperCamelCase : list[str] = [] UpperCamelCase : str = domain def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase : Any = parse.urljoin(self.domain , A_ ) self.urls.append(A_ ) def A_ ( _lowerCAmelCase ) -> str: return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] ) def A_ ( _lowerCAmelCase ) -> str: return parse.urlparse(_lowerCAmelCase ).netloc def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]: UpperCamelCase : int = get_domain_name(_lowerCAmelCase ) # Initialize the parser UpperCamelCase : str = Parser(_lowerCAmelCase ) try: # Open URL UpperCamelCase : int = requests.get(_lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase : Optional[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ) # Get the valid email. UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = emails_from_url("""https://github.com""") print(f"""{len(emails)} emails found:""") print("""\n""".join(sorted(emails)))
38
1
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCAmelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: UpperCamelCase : Any = _distribute_shards(**_lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : List[str] = _split_gen_kwargs(_lowerCAmelCase , _lowerCAmelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: if expected is RuntimeError: with pytest.raises(_lowerCAmelCase ): _number_of_shards_in_gen_kwargs(_lowerCAmelCase ) else: UpperCamelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCAmelCase ) assert out == expected
38
from __future__ import annotations def A_ ( _lowerCAmelCase ) -> list[int]: UpperCamelCase : Optional[Any] = [True] * limit UpperCamelCase : Optional[Any] = False UpperCamelCase : List[str] = False UpperCamelCase : Tuple = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase : Optional[Any] = i * 2 while index < limit: UpperCamelCase : int = False UpperCamelCase : Optional[int] = index + i UpperCamelCase : Any = [2] for i in range(3 , _lowerCAmelCase , 2 ): if is_prime[i]: primes.append(_lowerCAmelCase ) return primes def A_ ( _lowerCAmelCase = 100_0000 ) -> int: UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = 0 for i in range(len(_lowerCAmelCase ) ): for j in range(i + length , len(_lowerCAmelCase ) ): UpperCamelCase : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase : int = j - i UpperCamelCase : Dict = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
38
1
from __future__ import annotations __lowerCamelCase : Tuple = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class A__ : def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = graph # mapping node to its parent in resulting breadth first tree UpperCamelCase : dict[str, str | None] = {} UpperCamelCase : Dict = source_vertex def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = {self.source_vertex} UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = [self.source_vertex] # first in first out queue while queue: UpperCamelCase : List[str] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(A_ ) UpperCamelCase : Dict = vertex queue.append(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex UpperCamelCase : Any = self.parent.get(A_ ) if target_vertex_parent is None: UpperCamelCase : List[str] = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(A_ ) return self.shortest_path(A_ ) + F"""->{target_vertex}""" if __name__ == "__main__": __lowerCamelCase : Tuple = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __snake_case ): def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ): '''simple docstring''' super().__init__( features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , ) UpperCamelCase : Optional[int] = Generator( cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , ) def __UpperCamelCase( self ): '''simple docstring''' if self.streaming: UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: UpperCamelCase : Union[str, Any] = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = None UpperCamelCase : List[str] = None self.builder.download_and_prepare( download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , ) UpperCamelCase : int = self.builder.as_dataset( split="train" , verification_mode=A_ , in_memory=self.keep_in_memory ) return dataset
38
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = [[1, 2, 4], [1, 2, 3, 4]] UpperCamelCase : Dict = DisjunctiveConstraint(A_ ) self.assertTrue(isinstance(dc.token_ids , A_ ) ) with self.assertRaises(A_ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(A_ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(A_ ): DisjunctiveConstraint(A_ ) # fails here def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = [[1, 2, 3], [1, 2, 4]] UpperCamelCase : str = DisjunctiveConstraint(A_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = dc.update(1 ) UpperCamelCase : Dict = stepped is True and completed is False and reset is False self.assertTrue(A_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(2 ) UpperCamelCase : List[Any] = stepped is True and completed is False and reset is False self.assertTrue(A_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = dc.update(3 ) UpperCamelCase : Optional[int] = stepped is True and completed is True and reset is False self.assertTrue(A_ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCamelCase : Any = DisjunctiveConstraint(A_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
38
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing return x.sum() def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing return i + 1 @dataclass class A__ : _UpperCAmelCase :int _UpperCAmelCase :str class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = [] UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = [1, 2] UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]} UpperCamelCase : Any = {"a": {"1": 1}, "b": 2} UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4} UpperCamelCase : Dict = {} UpperCamelCase : Any = [] UpperCamelCase : Any = 2 UpperCamelCase : Any = [2, 3] UpperCamelCase : Optional[Any] = {"a": 2, "b": 3} UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]} UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3} UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) UpperCamelCase : List[str] = 2 self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} UpperCamelCase : int = {"a": 2, "b": 0, "c": 2} UpperCamelCase : Union[str, Any] = { "a": np.eye(2 ).astype(A_ ), "b": np.zeros(3 ).astype(A_ ), "c": np.ones(2 ).astype(A_ ), } self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A_ ): # can't pickle a local lambda map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = {"a": 1, "b": 2} UpperCamelCase : List[Any] = {"a": 3, "b": 4} UpperCamelCase : Tuple = {"a": 5, "b": 6} UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' class A__ : _UpperCAmelCase :str = 'bar' UpperCamelCase : List[Any] = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(A_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )} UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A__ ( __snake_case ): @require_tf def __UpperCamelCase( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers UpperCamelCase : int = layers.Dense(2 ) def gen_random_output(): UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) ) return model(A_ ).numpy() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : List[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : Dict = gen_random_output() UpperCamelCase : Optional[int] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch def gen_random_output(): UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 ) UpperCamelCase : Dict = torch.rand(1 , 3 ) return model(A_ ).detach().numpy() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Dict = gen_random_output() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Optional[int] = gen_random_output() UpperCamelCase : List[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __UpperCamelCase( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() with temp_seed(42 ): UpperCamelCase : Optional[Any] = gen_random_output() UpperCamelCase : Optional[Any] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}] ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten() assert output == expected_output def A_ ( ) -> List[Any]: UpperCamelCase : str = A(x=1 , y="foobar" ) UpperCamelCase : Tuple = {"x": 1, "y": "foobar"} assert asdict(_lowerCAmelCase ) == expected_output UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCAmelCase ) == expected_output with pytest.raises(_lowerCAmelCase ): asdict([1, A(x=10 , y="foo" )] ) def A_ ( _lowerCAmelCase ) -> Tuple: return text.split() def A_ ( _lowerCAmelCase ) -> Dict: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A_ ( ) -> str: with Pool(2 ) as pool: UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCamelCase : Any = [] for yield_time, content in iflatmap_unordered( _lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCAmelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(_lowerCAmelCase ) == 4
38
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class A__ ( __snake_case ): _UpperCAmelCase :Any = 'Wav2Vec2FeatureExtractor' _UpperCAmelCase :List[str] = 'AutoTokenizer' def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__(A_ , A_ ) UpperCamelCase : Union[str, Any] = self.feature_extractor UpperCamelCase : Optional[int] = False @classmethod def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' try: return super().from_pretrained(A_ , **A_ ) except OSError: warnings.warn( F"""Loading a tokenizer inside {cls.__name__} from a config that does not""" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , A_ , ) UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ ) UpperCamelCase : Any = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ ) return cls(feature_extractor=A_ , tokenizer=A_ ) def __call__( self , *A_ , **A_ ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*A_ , **A_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) UpperCamelCase : int = kwargs.pop("raw_speech" ) else: UpperCamelCase : Any = kwargs.pop("audio" , A_ ) UpperCamelCase : str = kwargs.pop("sampling_rate" , A_ ) UpperCamelCase : Dict = kwargs.pop("text" , A_ ) if len(A_ ) > 0: UpperCamelCase : str = args[0] UpperCamelCase : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: UpperCamelCase : str = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ ) if text is not None: UpperCamelCase : Optional[int] = self.tokenizer(A_ , **A_ ) if text is None: return inputs elif audio is None: return encodings else: UpperCamelCase : Union[str, Any] = encodings["input_ids"] return inputs def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*A_ , **A_ ) UpperCamelCase : int = kwargs.pop("input_features" , A_ ) UpperCamelCase : Optional[int] = kwargs.pop("labels" , A_ ) if len(A_ ) > 0: UpperCamelCase : Optional[int] = args[0] UpperCamelCase : Tuple = args[1:] if input_features is not None: UpperCamelCase : Any = self.feature_extractor.pad(A_ , *A_ , **A_ ) if labels is not None: UpperCamelCase : Dict = self.tokenizer.pad(A_ , **A_ ) if labels is None: return input_features elif input_features is None: return labels else: UpperCamelCase : Optional[int] = labels["input_ids"] return input_features def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @contextmanager def __UpperCamelCase( self ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) UpperCamelCase : Optional[Any] = True UpperCamelCase : Any = self.tokenizer yield UpperCamelCase : Dict = self.feature_extractor UpperCamelCase : Dict = False
38
from ..utils import DummyObject, requires_backends class A__ ( metaclass=__snake_case ): _UpperCAmelCase :Tuple = ['note_seq'] def __init__( self , *A_ , **A_ ): '''simple docstring''' requires_backends(self , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] ) @classmethod def __UpperCamelCase( cls , *A_ , **A_ ): '''simple docstring''' requires_backends(cls , ["note_seq"] )
38
1
__lowerCamelCase : List[Any] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ __lowerCamelCase : Optional[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __lowerCamelCase : List[Any] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
38
import math import tensorflow as tf from packaging import version def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) )) return x * cdf def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase ) return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype ) UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase ) UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def A_ ( _lowerCAmelCase ) -> List[Any]: return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 ) def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str: UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase ) return a * tf.math.sigmoid(_lowerCAmelCase ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def A_ ( _lowerCAmelCase ) -> Any: return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase ) __lowerCamelCase : Optional[int] = tf.keras.activations.gelu __lowerCamelCase : int = approximate_gelu_wrap else: __lowerCamelCase : List[Any] = _gelu __lowerCamelCase : Optional[Any] = _gelu_new __lowerCamelCase : Any = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def A_ ( _lowerCAmelCase ) -> Optional[Any]: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
38
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=50 , A_=0.02 , A_=True , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = parent UpperCamelCase : List[str] = batch_size UpperCamelCase : Optional[int] = seq_length UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : List[Any] = use_input_mask UpperCamelCase : str = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Union[str, Any] = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : List[str] = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : List[str] = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : str = scope def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : List[str] = None if self.use_input_mask: UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def __UpperCamelCase( self ): '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A_ , initializer_range=self.initializer_range , ) def __UpperCamelCase( self ): '''simple docstring''' ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = self.prepare_config_and_inputs() UpperCamelCase : Optional[Any] = True UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , **A_ , ): '''simple docstring''' UpperCamelCase : Optional[Any] = BertGenerationEncoder(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model(A_ , attention_mask=A_ ) UpperCamelCase : int = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ): '''simple docstring''' UpperCamelCase : str = True UpperCamelCase : Tuple = BertGenerationEncoder(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) UpperCamelCase : Optional[int] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = True UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = BertGenerationDecoder(config=A_ ).to(A_ ).eval() # first forward pass UpperCamelCase : str = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) UpperCamelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase : List[str] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )["hidden_states"][0] UpperCamelCase : Union[str, Any] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["hidden_states"][0] # select random slice UpperCamelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , *A_ , ): '''simple docstring''' UpperCamelCase : Any = BertGenerationDecoder(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _UpperCAmelCase :Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () _UpperCAmelCase :List[str] = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = BertGenerationEncoderTester(self ) UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() UpperCamelCase : Dict = "bert" self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase : List[str] = None self.model_tester.create_and_check_model_as_decoder( A_ , A_ , A_ , A_ , A_ , A_ , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(A_ ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase : Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCamelCase : Union[str, Any] = model(A_ )[0] UpperCamelCase : List[str] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , A_ ) UpperCamelCase : int = torch.tensor( [[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCamelCase : int = model(A_ )[0] UpperCamelCase : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape , A_ ) UpperCamelCase : List[Any] = torch.tensor( [[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
38
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :str = KandinskyVaaPipeline _UpperCAmelCase :str = [ 'image_embeds', 'negative_image_embeds', ] _UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds'] _UpperCAmelCase :List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :List[str] = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : Dict = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Dict = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) if str(A_ ).startswith("mps" ): UpperCamelCase : Optional[Any] = torch.manual_seed(A_ ) else: UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Optional[int] = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "cpu" UpperCamelCase : List[str] = self.get_dummy_components() UpperCamelCase : Tuple = self.pipeline_class(**A_ ) UpperCamelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : Optional[int] = output.images UpperCamelCase : int = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : Tuple = image[0, -3:, -3:, -1] UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : int = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) UpperCamelCase : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : str = "red cat, 4k photo" UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Tuple = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase : Tuple = pipeline( image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_ , A_ )
38
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ , "embed_dim" ) ) self.parent.assertTrue(hasattr(A_ , "num_heads" ) ) class A__ : def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=[16, 48, 96] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[2, 2, 2] , A_=[False, False, True] , A_=[0.0, 0.0, 0.0] , A_=0.02 , A_=1e-12 , A_=True , A_=True , A_=2 , ): '''simple docstring''' UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : Dict = image_size UpperCamelCase : Tuple = patch_sizes UpperCamelCase : Optional[Any] = patch_stride UpperCamelCase : Tuple = patch_padding UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : int = use_labels UpperCamelCase : List[Any] = num_labels UpperCamelCase : Any = num_channels UpperCamelCase : Optional[Any] = embed_dim UpperCamelCase : int = num_heads UpperCamelCase : Union[str, Any] = stride_kv UpperCamelCase : Optional[int] = depth UpperCamelCase : Any = cls_token UpperCamelCase : List[Any] = attention_drop_rate UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Any = layer_norm_eps def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Optional[int] = None if self.use_labels: # create a random int32 tensor of given shape UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase : str = self.get_config() return config, pixel_values, labels def __UpperCamelCase( self ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = TFCvtModel(config=A_ ) UpperCamelCase : Any = model(A_ , training=A_ ) UpperCamelCase : Union[str, Any] = (self.image_size, self.image_size) UpperCamelCase , UpperCamelCase : List[str] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCamelCase : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCamelCase : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.num_labels UpperCamelCase : Optional[Any] = TFCvtForImageClassification(A_ ) UpperCamelCase : Optional[int] = model(A_ , labels=A_ , training=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs UpperCamelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _UpperCAmelCase :Any = ( {'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase :Optional[int] = False _UpperCAmelCase :str = False _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :str = False _UpperCAmelCase :Tuple = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFCvtModelTester(self ) UpperCamelCase : Union[str, Any] = TFCvtConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def __UpperCamelCase( self ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def __UpperCamelCase( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(A_ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(A_ ) UpperCamelCase : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : int = [*signature.parameters.keys()] UpperCamelCase : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase : str = model_class(A_ ) UpperCamelCase : List[str] = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : Dict = outputs.hidden_states UpperCamelCase : int = len(self.model_tester.depth ) self.assertEqual(len(A_ ) , A_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Dict = True check_hidden_states_output(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : List[str] = TFCvtModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_ ( ) -> Dict: UpperCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class A__ ( unittest.TestCase ): @cached_property def __UpperCamelCase( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase : Optional[int] = self.default_image_processor UpperCamelCase : List[str] = prepare_img() UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" ) # forward pass UpperCamelCase : Dict = model(**A_ ) # verify the logits UpperCamelCase : Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase : int = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A_ , atol=1e-4 ) )
38
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A_ ( ) -> Dict: UpperCamelCase : Tuple = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_lowerCAmelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_lowerCAmelCase ) return parser.parse_args() def A_ ( ) -> Optional[int]: UpperCamelCase : Tuple = parse_args() # Import training_script as a module. UpperCamelCase : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase : List[Any] = script_fpath.stem UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase ) # Patch sys.argv UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
38
1
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ): '''simple docstring''' UpperCamelCase : int = size if size is not None else {"height": 18, "width": 18} UpperCamelCase : Tuple = parent UpperCamelCase : List[str] = batch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : List[str] = image_size UpperCamelCase : Any = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : Dict = do_resize UpperCamelCase : Optional[int] = size UpperCamelCase : Union[str, Any] = do_normalize UpperCamelCase : Union[str, Any] = image_mean UpperCamelCase : List[str] = image_std def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = DPTImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = DPTImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
import math def A_ ( _lowerCAmelCase ) -> int: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(_lowerCAmelCase ) if number < 1: UpperCamelCase : Union[str, Any] = F"""Input value of [number={number}] must be > 0""" raise ValueError(_lowerCAmelCase ) elif number == 1: return 3 elif number == 2: return 5 else: UpperCamelCase : List[Any] = int(math.log(number // 3 , 2 ) ) + 2 UpperCamelCase : Optional[Any] = [3, 5] UpperCamelCase : Optional[int] = 2 UpperCamelCase : int = 3 for block in range(1 , _lowerCAmelCase ): for _ in range(_lowerCAmelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __lowerCamelCase : List[str] = 0 try: __lowerCamelCase : Optional[Any] = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18} UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : List[Any] = num_channels UpperCamelCase : Union[str, Any] = num_frames UpperCamelCase : Any = image_size UpperCamelCase : Tuple = min_resolution UpperCamelCase : Optional[Any] = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Tuple = size UpperCamelCase : List[Any] = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Any = image_std UpperCamelCase : str = crop_size def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = VivitImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
38
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class A__ ( __snake_case ): _UpperCAmelCase :torch.FloatTensor class A__ ( nn.Module ): def __init__( self , A_=3 , A_=3 , A_=("DownEncoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_=True , ): '''simple docstring''' super().__init__() UpperCamelCase : int = layers_per_block UpperCamelCase : List[str] = torch.nn.Convad( A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) UpperCamelCase : Dict = None UpperCamelCase : List[str] = nn.ModuleList([] ) # down UpperCamelCase : Union[str, Any] = block_out_channels[0] for i, down_block_type in enumerate(A_ ): UpperCamelCase : List[str] = output_channel UpperCamelCase : Optional[int] = block_out_channels[i] UpperCamelCase : Union[str, Any] = i == len(A_ ) - 1 UpperCamelCase : List[str] = get_down_block( A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , ) self.down_blocks.append(A_ ) # mid UpperCamelCase : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , ) # out UpperCamelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1e-6 ) UpperCamelCase : Any = nn.SiLU() UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels UpperCamelCase : Union[str, Any] = nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1 ) UpperCamelCase : Optional[int] = False def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : int = x UpperCamelCase : List[Any] = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ ): def custom_forward(*A_ ): return module(*A_ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: UpperCamelCase : int = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) , A_ , use_reentrant=A_ ) # middle UpperCamelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , use_reentrant=A_ ) else: for down_block in self.down_blocks: UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ ) # middle UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A_ ) else: # down for down_block in self.down_blocks: UpperCamelCase : Union[str, Any] = down_block(A_ ) # middle UpperCamelCase : Optional[Any] = self.mid_block(A_ ) # post-process UpperCamelCase : Dict = self.conv_norm_out(A_ ) UpperCamelCase : Union[str, Any] = self.conv_act(A_ ) UpperCamelCase : Optional[Any] = self.conv_out(A_ ) return sample class A__ ( nn.Module ): def __init__( self , A_=3 , A_=3 , A_=("UpDecoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_="group" , ): '''simple docstring''' super().__init__() UpperCamelCase : List[str] = layers_per_block UpperCamelCase : Optional[Any] = nn.Convad( A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) UpperCamelCase : Dict = None UpperCamelCase : Tuple = nn.ModuleList([] ) UpperCamelCase : Dict = in_channels if norm_type == "spatial" else None # mid UpperCamelCase : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , ) # up UpperCamelCase : Optional[int] = list(reversed(A_ ) ) UpperCamelCase : Union[str, Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): UpperCamelCase : Tuple = output_channel UpperCamelCase : Optional[int] = reversed_block_out_channels[i] UpperCamelCase : str = i == len(A_ ) - 1 UpperCamelCase : int = get_up_block( A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , ) self.up_blocks.append(A_ ) UpperCamelCase : Any = output_channel # out if norm_type == "spatial": UpperCamelCase : Tuple = SpatialNorm(block_out_channels[0] , A_ ) else: UpperCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1e-6 ) UpperCamelCase : str = nn.SiLU() UpperCamelCase : str = nn.Convad(block_out_channels[0] , A_ , 3 , padding=1 ) UpperCamelCase : Optional[int] = False def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Tuple = z UpperCamelCase : int = self.conv_in(A_ ) UpperCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ ): def custom_forward(*A_ ): return module(*A_ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle UpperCamelCase : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , A_ , use_reentrant=A_ ) UpperCamelCase : Union[str, Any] = sample.to(A_ ) # up for up_block in self.up_blocks: UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) , A_ , A_ , use_reentrant=A_ ) else: # middle UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , A_ ) UpperCamelCase : List[Any] = sample.to(A_ ) # up for up_block in self.up_blocks: UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ , A_ ) else: # middle UpperCamelCase : str = self.mid_block(A_ , A_ ) UpperCamelCase : List[str] = sample.to(A_ ) # up for up_block in self.up_blocks: UpperCamelCase : Optional[int] = up_block(A_ , A_ ) # post-process if latent_embeds is None: UpperCamelCase : int = self.conv_norm_out(A_ ) else: UpperCamelCase : int = self.conv_norm_out(A_ , A_ ) UpperCamelCase : int = self.conv_act(A_ ) UpperCamelCase : Optional[Any] = self.conv_out(A_ ) return sample class A__ ( nn.Module ): def __init__( self , A_ , A_ , A_ , A_=None , A_="random" , A_=False , A_=True ): '''simple docstring''' super().__init__() UpperCamelCase : List[Any] = n_e UpperCamelCase : Dict = vq_embed_dim UpperCamelCase : Optional[Any] = beta UpperCamelCase : int = legacy UpperCamelCase : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) UpperCamelCase : str = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) UpperCamelCase : str = self.used.shape[0] UpperCamelCase : List[Any] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": UpperCamelCase : Tuple = self.re_embed UpperCamelCase : Optional[Any] = self.re_embed + 1 print( F"""Remapping {self.n_e} indices to {self.re_embed} indices. """ F"""Using {self.unknown_index} for unknown indices.""" ) else: UpperCamelCase : Optional[Any] = n_e UpperCamelCase : Tuple = sane_index_shape def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = inds.shape assert len(A_ ) > 1 UpperCamelCase : Dict = inds.reshape(ishape[0] , -1 ) UpperCamelCase : Any = self.used.to(A_ ) UpperCamelCase : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() UpperCamelCase : Optional[Any] = match.argmax(-1 ) UpperCamelCase : Union[str, Any] = match.sum(2 ) < 1 if self.unknown_index == "random": UpperCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: UpperCamelCase : List[Any] = self.unknown_index return new.reshape(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = inds.shape assert len(A_ ) > 1 UpperCamelCase : int = inds.reshape(ishape[0] , -1 ) UpperCamelCase : Dict = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token UpperCamelCase : List[Any] = 0 # simply set to zero UpperCamelCase : str = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_ ) return back.reshape(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous() UpperCamelCase : Any = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(A_ , self.embedding.weight ) , dim=1 ) UpperCamelCase : Optional[int] = self.embedding(A_ ).view(z.shape ) UpperCamelCase : Dict = None UpperCamelCase : Optional[Any] = None # compute loss for embedding if not self.legacy: UpperCamelCase : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: UpperCamelCase : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients UpperCamelCase : Dict = z + (z_q - z).detach() # reshape back to match original input shape UpperCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: UpperCamelCase : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis UpperCamelCase : Dict = self.remap_to_used(A_ ) UpperCamelCase : int = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: UpperCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if self.remap is not None: UpperCamelCase : Dict = indices.reshape(shape[0] , -1 ) # add batch axis UpperCamelCase : List[str] = self.unmap_to_all(A_ ) UpperCamelCase : List[str] = indices.reshape(-1 ) # flatten again # get quantized latent vectors UpperCamelCase : Union[str, Any] = self.embedding(A_ ) if shape is not None: UpperCamelCase : int = z_q.view(A_ ) # reshape back to match original input shape UpperCamelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class A__ ( __snake_case ): def __init__( self , A_ , A_=False ): '''simple docstring''' UpperCamelCase : Optional[Any] = parameters UpperCamelCase , UpperCamelCase : Union[str, Any] = torch.chunk(A_ , 2 , dim=1 ) UpperCamelCase : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) UpperCamelCase : Dict = deterministic UpperCamelCase : str = torch.exp(0.5 * self.logvar ) UpperCamelCase : Any = torch.exp(self.logvar ) if self.deterministic: UpperCamelCase : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __UpperCamelCase( self , A_ = None ): '''simple docstring''' UpperCamelCase : Union[str, Any] = randn_tensor( self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype ) UpperCamelCase : Optional[int] = self.mean + self.std * sample return x def __UpperCamelCase( self , A_=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __UpperCamelCase( self , A_ , A_=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) UpperCamelCase : str = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A_ ) def __UpperCamelCase( self ): '''simple docstring''' return self.mean
38
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase : Dict = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase : Tuple = { """facebook/blenderbot_small-90M""": 512, } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , ) UpperCamelCase : Union[str, Any] = add_prefix_space def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
38
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) # TODO: upload to AWS __lowerCamelCase : List[str] = { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Any = 'retribert' def __init__( self , A_=3_0522 , A_=768 , A_=8 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=128 , A_=0 , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , **A_ ) UpperCamelCase : Tuple = vocab_size UpperCamelCase : List[Any] = hidden_size UpperCamelCase : int = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Tuple = hidden_act UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : Dict = hidden_dropout_prob UpperCamelCase : str = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Tuple = type_vocab_size UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Dict = layer_norm_eps UpperCamelCase : Union[str, Any] = share_encoders UpperCamelCase : Tuple = projection_dim
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : int = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
38
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""", """facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class A__ ( __snake_case ): _UpperCAmelCase :int = 'xlm-roberta-xl' def __init__( self , A_=25_0880 , A_=2560 , A_=36 , A_=32 , A_=1_0240 , A_="gelu" , A_=0.1 , A_=0.1 , A_=514 , A_=1 , A_=0.02 , A_=1e-05 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Dict = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : int = num_hidden_layers UpperCamelCase : List[Any] = num_attention_heads UpperCamelCase : List[Any] = hidden_act UpperCamelCase : List[str] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Optional[Any] = layer_norm_eps UpperCamelCase : Any = position_embedding_type UpperCamelCase : List[str] = use_cache UpperCamelCase : List[Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
import logging import os import threading import time try: import warnings except ImportError: __lowerCamelCase : str = None try: import msvcrt except ImportError: __lowerCamelCase : str = None try: import fcntl except ImportError: __lowerCamelCase : List[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowerCamelCase : Union[str, Any] = OSError # Data # ------------------------------------------------ __lowerCamelCase : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowerCamelCase : Union[str, Any] = """3.0.12""" __lowerCamelCase : Any = None def A_ ( ) -> List[Any]: global _logger UpperCamelCase : Any = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = lock_file return None def __str__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.lock.release() return None class A__ : def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase : List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase : Tuple = None # The default timeout value. UpperCamelCase : Optional[Any] = timeout # We use this lock primarily for the lock counter. UpperCamelCase : Union[str, Any] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase : Dict = 0 return None @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file @property def __UpperCamelCase( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = float(A_ ) return None def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCamelCase( self , A_=None , A_=0.05 ): '''simple docstring''' if timeout is None: UpperCamelCase : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase : Dict = id(self ) UpperCamelCase : List[str] = self._lock_file UpperCamelCase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase( self , A_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase : List[Any] = id(self ) UpperCamelCase : Dict = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCamelCase : Dict = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=A_ ) return None def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase : Optional[int] = os.path.dirname(A_ ) UpperCamelCase : int = str(hash(A_ ) ) UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(A_ , A_ ) else: return path class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase : str = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self._lock_file_fd UpperCamelCase : str = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase : int = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase : List[str] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self._lock_file_fd UpperCamelCase : List[Any] = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase : Tuple = fd return None def __UpperCamelCase( self ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowerCamelCase : Dict = None if msvcrt: __lowerCamelCase : Any = WindowsFileLock elif fcntl: __lowerCamelCase : Any = UnixFileLock else: __lowerCamelCase : int = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
38
1
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) UpperCamelCase : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase : Union[str, Any] = True for i in range(_lowerCAmelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase : List[str] = True if a[i].islower(): UpperCamelCase : Union[str, Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
38
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str: if config_name_or_path is None: UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCamelCase : Tuple = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCamelCase : Tuple = question_encoder_name_or_path UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCamelCase : int = gen_config UpperCamelCase : Dict = question_encoder_config UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : Dict = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
38
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :List[Any] = IFImgaImgSuperResolutionPipeline _UpperCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} _UpperCAmelCase :Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) _UpperCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def __UpperCamelCase( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' if str(A_ ).startswith("mps" ): UpperCamelCase : List[str] = torch.manual_seed(A_ ) else: UpperCamelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __UpperCamelCase( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __UpperCamelCase( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __UpperCamelCase( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def __UpperCamelCase( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __UpperCamelCase( self ): '''simple docstring''' self._test_save_load_local() def __UpperCamelCase( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
38
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : str = 13 UpperCamelCase : int = 7 UpperCamelCase : str = True UpperCamelCase : Dict = True UpperCamelCase : str = True UpperCamelCase : Tuple = True UpperCamelCase : List[str] = 99 UpperCamelCase : Optional[Any] = 384 UpperCamelCase : Tuple = 2 UpperCamelCase : Union[str, Any] = 4 UpperCamelCase : Dict = 37 UpperCamelCase : Any = "gelu" UpperCamelCase : List[Any] = 0.1 UpperCamelCase : int = 0.1 UpperCamelCase : Tuple = 512 UpperCamelCase : List[Any] = 16 UpperCamelCase : int = 2 UpperCamelCase : Dict = 0.02 UpperCamelCase : Optional[Any] = 3 UpperCamelCase : List[Any] = 4 UpperCamelCase : Dict = 128 UpperCamelCase : Optional[Any] = 2 UpperCamelCase : Optional[int] = 9 UpperCamelCase : Optional[int] = 1 UpperCamelCase : Union[str, Any] = None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : str = None if self.use_input_mask: UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = None UpperCamelCase : List[Any] = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Any = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel(config=A_ ) UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Optional[int] = [input_ids, input_mask] UpperCamelCase : Any = model(A_ ) UpperCamelCase : int = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ ) UpperCamelCase : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = self.num_choices UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ ) UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.num_labels UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ ) UpperCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ ) UpperCamelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Dict = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Any = False _UpperCAmelCase :int = False _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = TFConvBertModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Optional[Any] = True UpperCamelCase : Any = True if hasattr(A_ , "use_cache" ): UpperCamelCase : List[str] = True UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ ) for model_class in self.all_model_classes: UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ ) UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Optional[int] = len(model(A_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" ) UpperCamelCase : Dict = tf.keras.models.load_model(A_ ) UpperCamelCase : str = model(A_ ) if self.is_encoder_decoder: UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"] UpperCamelCase : Any = outputs["encoder_attentions"] else: UpperCamelCase : Any = outputs["hidden_states"] UpperCamelCase : List[str] = outputs["attentions"] self.assertEqual(len(A_ ) , A_ ) UpperCamelCase : int = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : Dict = True UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ ) UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ ) def check_decoder_attentions_output(A_ ): UpperCamelCase : Optional[Any] = len(A_ ) self.assertEqual(out_len % 2 , 0 ) UpperCamelCase : Any = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A_ ): UpperCamelCase : Dict = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True UpperCamelCase : List[Any] = False UpperCamelCase : Dict = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : List[str] = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCamelCase : Tuple = True UpperCamelCase : int = model_class(A_ ) UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine UpperCamelCase : Optional[int] = True UpperCamelCase : List[str] = True UpperCamelCase : Optional[int] = model_class(A_ ) UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @require_tf class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : List[str] = model(A_ )[0] UpperCamelCase : int = [1, 6, 768] self.assertEqual(output.shape , A_ ) UpperCamelCase : List[str] = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
38
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : Any = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " F"""{test_file} instead.""" ) UpperCamelCase : int = components[-1] if not test_fn.endswith("py" ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) UpperCamelCase : List[str] = components[:-1] + [test_fn.replace(".py" , "" )] UpperCamelCase : Optional[int] = ".".join(_lowerCAmelCase ) return test_module_path def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : Optional[Any] = get_module_path(_lowerCAmelCase ) UpperCamelCase : Any = importlib.import_module(_lowerCAmelCase ) return test_module def A_ ( _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Dict = [] UpperCamelCase : Union[str, Any] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , "all_model_classes" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : Tuple = get_test_classes(_lowerCAmelCase ) UpperCamelCase : Dict = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Dict = test_class() if hasattr(_lowerCAmelCase , "setUp" ): test.setUp() UpperCamelCase : List[str] = None if hasattr(_lowerCAmelCase , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: UpperCamelCase : List[Any] = test.model_tester.__class__ return model_tester def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: UpperCamelCase : int = get_test_classes(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: UpperCamelCase : str = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : str = [] for test_class in test_classes: UpperCamelCase : List[str] = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def A_ ( _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : str = get_test_classes(_lowerCAmelCase ) UpperCamelCase : str = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : str = get_model_classes(_lowerCAmelCase ) UpperCamelCase : Tuple = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = get_model_classes(_lowerCAmelCase ) UpperCamelCase : Tuple = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def A_ ( _lowerCAmelCase ) -> str: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
38
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : str = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'camembert' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : List[str] = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : str = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = use_cache UpperCamelCase : List[str] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
38
1
from __future__ import annotations class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = order # a_{0} ... a_{k} UpperCamelCase : Dict = [1.0] + [0.0] * order # b_{0} ... b_{k} UpperCamelCase : List[str] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] UpperCamelCase : List[str] = [0.0] * self.order # y[n-1] ... y[n-k] UpperCamelCase : Tuple = [0.0] * self.order def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' if len(A_ ) < self.order: UpperCamelCase : Union[str, Any] = [1.0, *a_coeffs] if len(A_ ) != self.order + 1: UpperCamelCase : Optional[int] = ( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(A_ )}""" ) raise ValueError(A_ ) if len(A_ ) != self.order + 1: UpperCamelCase : List[str] = ( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(A_ )}""" ) raise ValueError(A_ ) UpperCamelCase : Dict = a_coeffs UpperCamelCase : Union[str, Any] = b_coeffs def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : int = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) UpperCamelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] UpperCamelCase : Optional[Any] = self.input_history[:-1] UpperCamelCase : Dict = self.output_history[:-1] UpperCamelCase : List[Any] = sample UpperCamelCase : Any = result return result
38
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(input_a == input_a == 0 ) def A_ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :int = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : List[str] = num_channels UpperCamelCase : Tuple = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : Optional[Any] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Optional[int] = hidden_act UpperCamelCase : Dict = initializer_range UpperCamelCase : Optional[Any] = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : Optional[int] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
38
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer'] _UpperCAmelCase :Tuple = 'BlipImageProcessor' _UpperCAmelCase :Optional[int] = 'AutoTokenizer' def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = False super().__init__(A_ , A_ ) UpperCamelCase : str = self.image_processor def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: UpperCamelCase : int = self.tokenizer UpperCamelCase : Optional[int] = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ ) if text is not None: UpperCamelCase : Dict = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: UpperCamelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.tokenizer.decode(*A_ , **A_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.tokenizer.model_input_names UpperCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
38
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") __lowerCamelCase : Optional[int] = logging.getLogger(__name__) @dataclass class A__ : _UpperCAmelCase :Optional[str] = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) _UpperCAmelCase :Optional[str] = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) _UpperCAmelCase :int = field( default=1_0_2_4 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _UpperCAmelCase :bool = field( default=__snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _UpperCAmelCase :bool = field( default=__snake_case , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) _UpperCAmelCase :Optional[int] = field( default=__snake_case , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _UpperCAmelCase :Optional[int] = field( default=__snake_case , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) _UpperCAmelCase :Optional[int] = field( default=__snake_case , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'A csv or a json file containing the training data.'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'A csv or a json file containing the validation data.'} ) _UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'A csv or a json file containing the test data.'} ) def __UpperCamelCase( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: UpperCamelCase : Tuple = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." UpperCamelCase : Optional[Any] = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class A__ : _UpperCAmelCase :str = field( default=__snake_case , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _UpperCAmelCase :bool = field( default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) _UpperCAmelCase :str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _UpperCAmelCase :bool = field( default=__snake_case , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def A_ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCamelCase : List[Any] = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) datasets.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCamelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. UpperCamelCase : Tuple = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: UpperCamelCase : str = data_args.train_file.split("." )[-1] UpperCamelCase : Union[str, Any] = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." UpperCamelCase : Dict = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(F"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files UpperCamelCase : Optional[Any] = load_dataset("csv" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files UpperCamelCase : str = load_dataset("json" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels UpperCamelCase : Optional[Any] = raw_datasets["train"].features["label"].names UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer UpperCamelCase : Any = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , ) UpperCamelCase : str = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: UpperCamelCase : str = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCamelCase : Tuple = False # Some models have set the order of the labels to use, so let's make sure we do use it. UpperCamelCase : Optional[int] = {"Refused": 0, "Entailed": 1} UpperCamelCase : Tuple = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) UpperCamelCase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_lowerCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(_lowerCAmelCase ): UpperCamelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] UpperCamelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd UpperCamelCase : str = examples["statement"] UpperCamelCase : Dict = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) UpperCamelCase : str = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ) UpperCamelCase : Any = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): UpperCamelCase : List[Any] = raw_datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) UpperCamelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: UpperCamelCase : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) UpperCamelCase : Optional[Any] = raw_datasets["validation"] if data_args.max_eval_samples is not None: UpperCamelCase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) UpperCamelCase : Tuple = raw_datasets["test"] if data_args.max_predict_samples is not None: UpperCamelCase : List[Any] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase ): UpperCamelCase : Tuple = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions UpperCamelCase : Union[str, Any] = np.argmax(_lowerCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCamelCase : Tuple = default_data_collator elif training_args.fpaa: UpperCamelCase : Optional[int] = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 ) else: UpperCamelCase : Tuple = None # Initialize our Trainer UpperCamelCase : str = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , ) # Training if training_args.do_train: UpperCamelCase : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: UpperCamelCase : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase : Optional[int] = last_checkpoint UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = train_result.metrics UpperCamelCase : Any = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase ) ) UpperCamelCase : int = min(_lowerCAmelCase , len(_lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , _lowerCAmelCase ) trainer.save_metrics("train" , _lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCamelCase : Union[str, Any] = trainer.evaluate(eval_dataset=_lowerCAmelCase ) UpperCamelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase ) UpperCamelCase : Any = min(_lowerCAmelCase , len(_lowerCAmelCase ) ) trainer.log_metrics("eval" , _lowerCAmelCase ) trainer.save_metrics("eval" , _lowerCAmelCase ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. UpperCamelCase : Optional[int] = predict_dataset.remove_columns("label" ) UpperCamelCase : Tuple = trainer.predict(_lowerCAmelCase , metric_key_prefix="predict" ).predictions UpperCamelCase : Union[str, Any] = np.argmax(_lowerCAmelCase , axis=1 ) UpperCamelCase : Tuple = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(_lowerCAmelCase ): UpperCamelCase : Tuple = label_list[item] writer.write(F"""{index}\t{item}\n""" ) UpperCamelCase : Dict = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
38
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Tuple = ['audio_values', 'audio_mask'] def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ): '''simple docstring''' super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) UpperCamelCase : Optional[int] = spectrogram_length UpperCamelCase : Dict = num_channels UpperCamelCase : Optional[Any] = patch_size UpperCamelCase : str = feature_size // self.patch_size[1] UpperCamelCase : List[str] = n_fft UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate UpperCamelCase : Optional[int] = sampling_rate UpperCamelCase : int = padding_value UpperCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = spectrogram( A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) UpperCamelCase : List[Any] = log_spec[:, :-1] UpperCamelCase : Optional[int] = log_spec - 20.0 UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Union[str, Any] = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCamelCase : str = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCamelCase : List[str] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCamelCase : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCamelCase : List[str] = padded_audio_features * self.padding_value for i in range(len(A_ ) ): UpperCamelCase : Union[str, Any] = audio_features[i] UpperCamelCase : Optional[int] = feature # return as BatchFeature if return_attention_mask: UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: UpperCamelCase : int = {"audio_values": padded_audio_features} UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
38
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCamelCase : List[Any] = 8 def A_ ( _lowerCAmelCase , _lowerCAmelCase=BITS ) -> Optional[Any]: UpperCamelCase : Any = x.device UpperCamelCase : List[str] = (x * 255).int().clamp(0 , 255 ) UpperCamelCase : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = rearrange(_lowerCAmelCase , "d -> d 1 1" ) UpperCamelCase : Any = rearrange(_lowerCAmelCase , "b c h w -> b c 1 h w" ) UpperCamelCase : Any = ((x & mask) != 0).float() UpperCamelCase : Union[str, Any] = rearrange(_lowerCAmelCase , "b c d h w -> b (c d) h w" ) UpperCamelCase : Optional[int] = bits * 2 - 1 return bits def A_ ( _lowerCAmelCase , _lowerCAmelCase=BITS ) -> int: UpperCamelCase : Dict = x.device UpperCamelCase : str = (x > 0).int() UpperCamelCase : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa ) UpperCamelCase : Union[str, Any] = rearrange(_lowerCAmelCase , "d -> d 1 1" ) UpperCamelCase : Tuple = rearrange(_lowerCAmelCase , "b (c d) h w -> b c d h w" , d=8 ) UpperCamelCase : List[Any] = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def A_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) UpperCamelCase : List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas UpperCamelCase : List[str] = self.alphas_cumprod[timestep] UpperCamelCase : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod UpperCamelCase : Optional[Any] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" UpperCamelCase : List[str] = self.bit_scale if self.config.clip_sample: UpperCamelCase : Any = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) UpperCamelCase : int = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Union[str, Any] = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide UpperCamelCase : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 UpperCamelCase : Any = model_output.device if torch.is_tensor(_lowerCAmelCase ) else "cpu" UpperCamelCase : Optional[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) UpperCamelCase : List[str] = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise UpperCamelCase : str = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def A_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="epsilon" , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]: UpperCamelCase : Tuple = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: UpperCamelCase , UpperCamelCase : List[Any] = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 ) else: UpperCamelCase : Any = None # 1. compute alphas, betas UpperCamelCase : Union[str, Any] = self.alphas_cumprod[t] UpperCamelCase : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one UpperCamelCase : List[Any] = 1 - alpha_prod_t UpperCamelCase : Any = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": UpperCamelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": UpperCamelCase : Optional[Any] = model_output else: raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" UpperCamelCase : Any = self.bit_scale if self.config.clip_sample: UpperCamelCase : int = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t UpperCamelCase : List[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase : Optional[Any] = 0 if t > 0: UpperCamelCase : Any = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device ) UpperCamelCase : str = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise UpperCamelCase : str = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) class A__ ( __snake_case ): def __init__( self , A_ , A_ , A_ = 1.0 , ): '''simple docstring''' super().__init__() UpperCamelCase : Optional[int] = bit_scale UpperCamelCase : int = ( ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 256 , A_ = 256 , A_ = 50 , A_ = None , A_ = 1 , A_ = "pil" , A_ = True , **A_ , ): '''simple docstring''' UpperCamelCase : str = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=A_ , ) UpperCamelCase : str = decimal_to_bits(A_ ) * self.bit_scale UpperCamelCase : List[str] = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual UpperCamelCase : int = self.unet(A_ , A_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase : Tuple = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Any = bits_to_decimal(A_ ) if output_type == "pil": UpperCamelCase : Any = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
38
from __future__ import annotations from random import random from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar("""KT""") __lowerCamelCase : Dict = TypeVar("""VT""") class A__ ( Generic[KT, VT] ): def __init__( self , A_ = "root" , A_ = None ): '''simple docstring''' UpperCamelCase : int = key UpperCamelCase : List[Any] = value UpperCamelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F"""Node({self.key}: {self.value})""" @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.forward ) class A__ ( Generic[KT, VT] ): def __init__( self , A_ = 0.5 , A_ = 16 ): '''simple docstring''' UpperCamelCase : Node[KT, VT] = Node[KT, VT]() UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = p UpperCamelCase : List[str] = max_level def __str__( self ): '''simple docstring''' UpperCamelCase : int = list(self ) if len(A_ ) == 0: return F"""SkipList(level={self.level})""" UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 ) UpperCamelCase : Dict = max(A_ , 4 ) + 4 UpperCamelCase : str = self.head UpperCamelCase : List[Any] = [] UpperCamelCase : int = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) ) lines.append(" " * label_size + "| " * len(A_ ) ) while len(node.forward ) != 0: UpperCamelCase : Union[str, Any] = node.forward[0] lines.append( F"""[{node.key}]""".ljust(A_ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(A_ ) ) UpperCamelCase : Tuple = node.forward lines.append("None".ljust(A_ ) + "* " * len(A_ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(A_ ) def __iter__( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCamelCase : Union[str, Any] = node.forward[0] def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = 1 while random() < self.p and level < self.max_level: level += 1 return level def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = [] UpperCamelCase : List[Any] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase : str = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(A_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ ) if node is not None: for i, update_node in enumerate(A_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase : Tuple = node.forward[i] else: UpperCamelCase : List[Any] = update_node.forward[:i] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ ) if node is not None: UpperCamelCase : Union[str, Any] = value else: UpperCamelCase : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , A_ ): update_vector.append(self.head ) UpperCamelCase : Optional[int] = level UpperCamelCase : Dict = Node(A_ , A_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(A_ ) else: UpperCamelCase : List[Any] = new_node def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ ) if node is not None: return node.value return None def A_ ( ) -> List[Any]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) UpperCamelCase : Optional[int] = skip_list.head UpperCamelCase : List[str] = {} while node.level != 0: UpperCamelCase : str = node.forward[0] UpperCamelCase : Optional[int] = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def A_ ( ) -> List[Any]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) UpperCamelCase : Dict = skip_list.head UpperCamelCase : Tuple = {} while node.level != 0: UpperCamelCase : List[str] = node.forward[0] UpperCamelCase : Dict = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() assert skip_list.find("Some key" ) is None def A_ ( ) -> Tuple: UpperCamelCase : Optional[int] = SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def A_ ( ) -> Dict: UpperCamelCase : Optional[int] = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def A_ ( ) -> Dict: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[str]: UpperCamelCase : int = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def A_ ( ) -> List[Any]: UpperCamelCase : List[Any] = SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(_lowerCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def A_ ( ) -> Union[str, Any]: def is_sorted(_lowerCAmelCase ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) UpperCamelCase : int = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def A_ ( ) -> Tuple: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
38
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :str = CycleDiffusionPipeline _UpperCAmelCase :Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } _UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - {'latents'} _UpperCAmelCase :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) _UpperCAmelCase :str = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) UpperCamelCase : Dict = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase : Dict = CLIPTextModel(A_ ) UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase : Dict = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Dict = image / 2 + 0.5 if str(A_ ).startswith("mps" ): UpperCamelCase : Dict = torch.manual_seed(A_ ) else: UpperCamelCase : List[str] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Tuple = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : Optional[int] = self.get_dummy_components() UpperCamelCase : List[Any] = CycleDiffusionPipeline(**A_ ) UpperCamelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[str] = self.get_dummy_inputs(A_ ) UpperCamelCase : List[str] = pipe(**A_ ) UpperCamelCase : Optional[Any] = output.images UpperCamelCase : Any = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) UpperCamelCase : Union[str, Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.get_dummy_components() for name, module in components.items(): if hasattr(A_ , "half" ): UpperCamelCase : List[Any] = module.half() UpperCamelCase : int = CycleDiffusionPipeline(**A_ ) UpperCamelCase : Optional[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Tuple = self.get_dummy_inputs(A_ ) UpperCamelCase : str = pipe(**A_ ) UpperCamelCase : Dict = output.images UpperCamelCase : Dict = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) UpperCamelCase : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __UpperCamelCase( self ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def __UpperCamelCase( self ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def __UpperCamelCase( self ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase( self ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def __UpperCamelCase( self ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) UpperCamelCase : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) UpperCamelCase : Union[str, Any] = init_image.resize((512, 512) ) UpperCamelCase : str = "CompVis/stable-diffusion-v1-4" UpperCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" ) UpperCamelCase : Dict = CycleDiffusionPipeline.from_pretrained( A_ , scheduler=A_ , safety_checker=A_ , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() UpperCamelCase : List[str] = "A black colored car" UpperCamelCase : str = "A blue colored car" UpperCamelCase : Optional[Any] = torch.manual_seed(0 ) UpperCamelCase : Any = pipe( prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type="np" , ) UpperCamelCase : Dict = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) UpperCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) UpperCamelCase : Union[str, Any] = init_image.resize((512, 512) ) UpperCamelCase : str = "CompVis/stable-diffusion-v1-4" UpperCamelCase : List[Any] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" ) UpperCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() UpperCamelCase : List[Any] = "A black colored car" UpperCamelCase : Tuple = "A blue colored car" UpperCamelCase : List[str] = torch.manual_seed(0 ) UpperCamelCase : List[str] = pipe( prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type="np" , ) UpperCamelCase : Union[str, Any] = output.images assert np.abs(image - expected_image ).max() < 2e-2
38
from PIL import Image def A_ ( _lowerCAmelCase ) -> Image: UpperCamelCase , UpperCamelCase : List[Any] = image.size UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : List[str] = image.load() for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): UpperCamelCase : List[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCAmelCase ): for i in range(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
38
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def A_ ( _lowerCAmelCase , _lowerCAmelCase=0.999 , _lowerCAmelCase="cosine" , ) -> List[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCAmelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCAmelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase : str = [] for i in range(_lowerCAmelCase ): UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) ) return torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :List[str] = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase :List[Any] = 2 @register_to_config def __init__( self , A_ = 1000 , A_ = 0.0_00_85 , A_ = 0.0_12 , A_ = "linear" , A_ = None , A_ = "epsilon" , A_ = False , A_ = False , A_ = 1.0 , A_ = "linspace" , A_ = 0 , ): '''simple docstring''' if trained_betas is not None: UpperCamelCase : Union[str, Any] = torch.tensor(A_ , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCamelCase : List[Any] = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCamelCase : Optional[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCamelCase : Tuple = betas_for_alpha_bar(A_ , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCamelCase : Union[str, Any] = betas_for_alpha_bar(A_ , alpha_transform_type="exp" ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCamelCase : int = 1.0 - self.betas UpperCamelCase : List[str] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(A_ , A_ , A_ ) UpperCamelCase : Union[str, Any] = use_karras_sigmas def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' if schedule_timesteps is None: UpperCamelCase : Any = self.timesteps UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCamelCase : Dict = 1 if len(A_ ) > 1 else 0 else: UpperCamelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep UpperCamelCase : Any = self._index_counter[timestep_int] return indices[pos].item() @property def __UpperCamelCase( self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __UpperCamelCase( self , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Optional[int] = self.index_for_timestep(A_ ) UpperCamelCase : Optional[Any] = self.sigmas[step_index] UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[int] = num_inference_steps UpperCamelCase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCamelCase : int = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCamelCase : List[str] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCamelCase : List[Any] = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCamelCase : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCamelCase : Dict = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCamelCase : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCamelCase : Dict = np.log(A_ ) UpperCamelCase : Optional[Any] = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ ) if self.config.use_karras_sigmas: UpperCamelCase : Tuple = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps ) UpperCamelCase : Dict = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] ) UpperCamelCase : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCamelCase : List[Any] = torch.from_numpy(A_ ).to(device=A_ ) UpperCamelCase : List[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCamelCase : Optional[Any] = torch.from_numpy(A_ ) UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith("mps" ): # mps does not support float64 UpperCamelCase : str = timesteps.to(A_ , dtype=torch.floataa ) else: UpperCamelCase : Any = timesteps.to(device=A_ ) # empty dt and derivative UpperCamelCase : Any = None UpperCamelCase : Tuple = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCamelCase : str = defaultdict(A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = np.log(A_ ) # get distribution UpperCamelCase : Optional[int] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCamelCase : List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCamelCase : Any = low_idx + 1 UpperCamelCase : int = log_sigmas[low_idx] UpperCamelCase : int = log_sigmas[high_idx] # interpolate sigmas UpperCamelCase : Tuple = (low - log_sigma) / (low - high) UpperCamelCase : Dict = np.clip(A_ , 0 , 1 ) # transform interpolation to time range UpperCamelCase : List[Any] = (1 - w) * low_idx + w * high_idx UpperCamelCase : List[Any] = t.reshape(sigma.shape ) return t def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : float = in_sigmas[-1].item() UpperCamelCase : float = in_sigmas[0].item() UpperCamelCase : Dict = 7.0 # 7.0 is the value used in the paper UpperCamelCase : List[Any] = np.linspace(0 , 1 , A_ ) UpperCamelCase : Union[str, Any] = sigma_min ** (1 / rho) UpperCamelCase : Dict = sigma_max ** (1 / rho) UpperCamelCase : Dict = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __UpperCamelCase( self ): '''simple docstring''' return self.dt is None def __UpperCamelCase( self , A_ , A_ , A_ , A_ = True , ): '''simple docstring''' UpperCamelCase : Optional[int] = self.index_for_timestep(A_ ) # advance index counter by 1 UpperCamelCase : int = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCamelCase : List[str] = self.sigmas[step_index] UpperCamelCase : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCamelCase : int = self.sigmas[step_index - 1] UpperCamelCase : Optional[int] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCamelCase : List[Any] = 0 UpperCamelCase : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCamelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next UpperCamelCase : str = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCamelCase : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next UpperCamelCase : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCamelCase : Dict = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCamelCase : Optional[Any] = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCamelCase : Optional[Any] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCamelCase : Optional[int] = sigma_next - sigma_hat # store for 2nd order step UpperCamelCase : Any = derivative UpperCamelCase : Any = dt UpperCamelCase : int = sample else: # 2. 2nd order / Heun's method UpperCamelCase : Dict = (sample - pred_original_sample) / sigma_next UpperCamelCase : Union[str, Any] = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCamelCase : Dict = self.dt UpperCamelCase : Tuple = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCamelCase : Any = None UpperCamelCase : List[Any] = None UpperCamelCase : Any = None UpperCamelCase : List[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 UpperCamelCase : Union[str, Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCamelCase : Dict = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCamelCase : Any = self.timesteps.to(original_samples.device ) UpperCamelCase : Union[str, Any] = timesteps.to(original_samples.device ) UpperCamelCase : List[str] = [self.index_for_timestep(A_ , A_ ) for t in timesteps] UpperCamelCase : List[str] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCamelCase : str = sigma.unsqueeze(-1 ) UpperCamelCase : List[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
38
from math import loga def A_ ( _lowerCAmelCase ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise TypeError("Input value must be a 'int' type" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
38
1