code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy snake_case : Tuple = logging.getLogger(__name__) snake_case : List[Any] = """pytorch_model.bin""" @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" __UpperCAmelCase = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""}) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , ) @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" __UpperCAmelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""}) __UpperCAmelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""}) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """A csv or a json file containing the validation data."""}) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """The name of the task to train on."""} , ) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """The list of labels for the task."""}) @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" __UpperCAmelCase = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""}) __UpperCAmelCase = dataclasses.field( default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""}) __UpperCAmelCase = dataclasses.field( default="""no""" , metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } , ) __UpperCAmelCase = dataclasses.field( default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) __UpperCAmelCase = dataclasses.field( default=0.0 , metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } , ) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , ) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , ) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , ) __UpperCAmelCase = dataclasses.field( default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , ) __UpperCAmelCase = dataclasses.field( default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) __UpperCAmelCase = dataclasses.field( default=a_ , metadata={"""help""": """Random seed for initialization."""} , ) def A ( __snake_case: List[str] , __snake_case: List[Any] , __snake_case: Optional[int] , __snake_case: Union[str, Any] , __snake_case: Any , __snake_case: Union[str, Any] ) -> List[str]: """simple docstring""" __magic_name__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: __magic_name__ = dataset.filter(lambda __snake_case : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 __magic_name__ = int(eval_result * len(__snake_case ) ) print(__snake_case ) __magic_name__ = dataset.sort('probability' , reverse=__snake_case ) __magic_name__ = dataset.select(range(__snake_case ) ) __magic_name__ = dataset.remove_columns(['label', 'probability'] ) __magic_name__ = dataset.rename_column('prediction' , 'label' ) __magic_name__ = dataset.map(lambda __snake_case : {"label": idalabel[example["label"]]} ) __magic_name__ = dataset.shuffle(seed=args.seed ) __magic_name__ = os.path.join(__snake_case , F"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__snake_case , index=__snake_case ) else: dataset.to_json(__snake_case ) def A ( __snake_case: Any , __snake_case: Dict , __snake_case: List[str] , __snake_case: Any , **__snake_case: Union[str, Any] ) -> List[str]: """simple docstring""" __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() __magic_name__ = STModelArguments(model_name_or_path=__snake_case ) __magic_name__ = STDataArguments(train_file=__snake_case , infer_file=__snake_case ) __magic_name__ = STTrainingArguments(output_dir=__snake_case ) __magic_name__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__snake_case ).items(): setattr(__snake_case , __snake_case , __snake_case ) for key, value in kwargs.items(): if hasattr(__snake_case , __snake_case ): setattr(__snake_case , __snake_case , __snake_case ) # Sanity checks __magic_name__ = {} __magic_name__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None __magic_name__ = args.train_file __magic_name__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None __magic_name__ = args.eval_file for key in data_files: __magic_name__ = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: __magic_name__ = extension else: assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) __magic_name__ = F"""{args.output_dir}/self-train_iter-{{}}""".format __magic_name__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) accelerator.wait_for_everyone() __magic_name__ = None __magic_name__ = None __magic_name__ = 0 __magic_name__ = False # Show the progress bar __magic_name__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): __magic_name__ = data_dir_format(__snake_case ) assert os.path.exists(__snake_case ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 __magic_name__ = os.path.join(__snake_case , 'stage-1' ) __magic_name__ = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__snake_case , __snake_case ): arguments_dict.update({key: value} ) __magic_name__ = os.path.join(__snake_case , 'best-checkpoint' , __snake_case ) if os.path.exists(__snake_case ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __snake_case , __snake_case , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __snake_case ) finetune(**__snake_case ) accelerator.wait_for_everyone() assert os.path.exists(__snake_case ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , __snake_case ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data __magic_name__ = os.path.join(__snake_case , 'best-checkpoint' ) __magic_name__ = os.path.join(__snake_case , 'stage-2' ) # Update arguments_dict __magic_name__ = model_path __magic_name__ = data_files['train'] __magic_name__ = current_output_dir __magic_name__ = os.path.join(__snake_case , 'best-checkpoint' , __snake_case ) if os.path.exists(__snake_case ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __snake_case , __snake_case , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __snake_case ) finetune(**__snake_case ) accelerator.wait_for_everyone() assert os.path.exists(__snake_case ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , __snake_case ) __magic_name__ = iteration __magic_name__ = data_dir_format(iteration + 1 ) __magic_name__ = AutoConfig.from_pretrained(os.path.join(__snake_case , 'best-checkpoint' ) ) __magic_name__ = config.idalabel __magic_name__ = os.path.join(__snake_case , 'eval_results_best-checkpoint.json' ) __magic_name__ = os.path.join(__snake_case , 'test_results_best-checkpoint.json' ) assert os.path.exists(__snake_case ) with open(__snake_case , 'r' ) as f: __magic_name__ = float(json.load(__snake_case )[args.eval_metric] ) __magic_name__ = os.path.join(__snake_case , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(__snake_case ) # Loading the dataset from local csv or json files. __magic_name__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] __magic_name__ = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(__snake_case , exist_ok=__snake_case ) shutil.copy(__snake_case , os.path.join(__snake_case , F"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__snake_case ): shutil.copy(__snake_case , os.path.join(__snake_case , F"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) accelerator.wait_for_everyone() __magic_name__ = os.path.join(__snake_case , F"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: __magic_name__ = eval_result if best_iteration is None: __magic_name__ = new_iteration __magic_name__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: __magic_name__ = new_iteration __magic_name__ = new_eval_result __magic_name__ = 0 else: if new_eval_result == best_eval_result: __magic_name__ = new_iteration __magic_name__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: __magic_name__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , __snake_case ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__snake_case , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__snake_case , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__snake_case , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__snake_case , 'eval_results_best-iteration.json' ) , )
545
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version snake_case : Tuple = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""") snake_case : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def A ( __snake_case: str ) -> Tuple: """simple docstring""" with open(__snake_case , 'rb' ) as f: __magic_name__ = Image.open(__snake_case ) return im.convert('RGB' ) @dataclass class UpperCamelCase__ : """simple docstring""" __UpperCAmelCase = field( default=a_ , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) __UpperCAmelCase = field( default=a_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) __UpperCAmelCase = field(default=a_ , metadata={"""help""": """A folder containing the training data."""}) __UpperCAmelCase = field(default=a_ , metadata={"""help""": """A folder containing the validation data."""}) __UpperCAmelCase = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""}) __UpperCAmelCase = field( default=a_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __UpperCAmelCase = field( default=a_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def a__ ( self : Tuple ): '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( 'You must specify either a dataset name from the hub or a train and/or validation directory.' ) @dataclass class UpperCamelCase__ : """simple docstring""" __UpperCAmelCase = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) __UpperCAmelCase = field( default=a_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a_)} , ) __UpperCAmelCase = field( default=a_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __UpperCAmelCase = field( default=a_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""}) __UpperCAmelCase = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __UpperCAmelCase = field(default=a_ , metadata={"""help""": """Name or path of preprocessor config."""}) __UpperCAmelCase = field( default=a_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) __UpperCAmelCase = field( default=a_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def A ( __snake_case: Dict ) -> Any: """simple docstring""" __magic_name__ = torch.stack([example['pixel_values'] for example in examples] ) __magic_name__ = torch.tensor([example['labels'] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def A ( ) -> List[str]: """simple docstring""" __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_image_classification' , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __magic_name__ = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __magic_name__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __magic_name__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: __magic_name__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , ) else: __magic_name__ = {} if data_args.train_dir is not None: __magic_name__ = os.path.join(data_args.train_dir , '**' ) if data_args.validation_dir is not None: __magic_name__ = os.path.join(data_args.validation_dir , '**' ) __magic_name__ = load_dataset( 'imagefolder' , data_files=__snake_case , cache_dir=model_args.cache_dir , task='image-classification' , ) # If we don't have a validation split, split off a percentage of train as validation. __magic_name__ = None if 'validation' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: __magic_name__ = dataset['train'].train_test_split(data_args.train_val_split ) __magic_name__ = split['train'] __magic_name__ = split['test'] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __magic_name__ = dataset['train'].features['labels'].names __magic_name__ , __magic_name__ = {}, {} for i, label in enumerate(__snake_case ): __magic_name__ = str(__snake_case ) __magic_name__ = label # Load the accuracy metric from the datasets package __magic_name__ = evaluate.load('accuracy' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__snake_case: str ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) __magic_name__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel=__snake_case , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __magic_name__ = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) __magic_name__ = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: __magic_name__ = image_processor.size['shortest_edge'] else: __magic_name__ = (image_processor.size['height'], image_processor.size['width']) __magic_name__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) __magic_name__ = Compose( [ RandomResizedCrop(__snake_case ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) __magic_name__ = Compose( [ Resize(__snake_case ), CenterCrop(__snake_case ), ToTensor(), normalize, ] ) def train_transforms(__snake_case: str ): __magic_name__ = [ _train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image'] ] return example_batch def val_transforms(__snake_case: List[Any] ): __magic_name__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __magic_name__ = ( dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __magic_name__ = ( dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__snake_case ) # Initalize our trainer __magic_name__ = Trainer( model=__snake_case , args=__snake_case , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: __magic_name__ = None if training_args.resume_from_checkpoint is not None: __magic_name__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: __magic_name__ = last_checkpoint __magic_name__ = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __magic_name__ = trainer.evaluate() trainer.log_metrics('eval' , __snake_case ) trainer.save_metrics('eval' , __snake_case ) # Write model card and (optionally) push to hub __magic_name__ = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'image-classification', 'dataset': data_args.dataset_name, 'tags': ['image-classification', 'vision'], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) if __name__ == "__main__": main()
545
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
460
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _UpperCAmelCase ( lowerCAmelCase__ ): """simple docstring""" a_ = 42 a_ = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
460
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A_ ( __lowerCamelCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = DistilBertModel(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , snake_case ) lowercase = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = DistilBertForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = DistilBertForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model( snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = DistilBertForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = DistilBertForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_choices lowercase = DistilBertForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = model( snake_case , attention_mask=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs lowercase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Optional[int] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _UpperCamelCase : str = ( { """feature-extraction""": DistilBertModel, """fill-mask""": DistilBertForMaskedLM, """question-answering""": DistilBertForQuestionAnswering, """text-classification""": DistilBertForSequenceClassification, """token-classification""": DistilBertForTokenClassification, """zero-shot""": DistilBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : str = True _UpperCamelCase : List[Any] = True _UpperCamelCase : Optional[int] = True _UpperCamelCase : Tuple = True def SCREAMING_SNAKE_CASE__ ( self ): lowercase = DistilBertModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , dim=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = DistilBertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase = True lowercase = model_class(config=snake_case ) lowercase = self._prepare_for_class(snake_case , snake_case ) lowercase = torch.jit.trace( snake_case , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case , os.path.join(snake_case , 'traced_model.pt' ) ) lowercase = torch.jit.load(os.path.join(snake_case , 'traced_model.pt' ) , map_location=snake_case ) loaded(inputs_dict['input_ids'].to(snake_case ) , inputs_dict['attention_mask'].to(snake_case ) ) @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): lowercase = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase = model(snake_case , attention_mask=snake_case )[0] lowercase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case ) lowercase = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
84
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration lowercase_ = HfArgumentParser(InitializationArguments) lowercase_ = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks lowercase_ = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) lowercase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config lowercase_ = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
291
0
from ....configuration_utils import PretrainedConfig from ....utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class lowerCamelCase ( __snake_case ): __lowerCamelCase = 'van' def __init__( self , __lowerCamelCase=2_24 , __lowerCamelCase=3 , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[64, 1_28, 3_20, 5_12] , __lowerCamelCase=[3, 3, 12, 3] , __lowerCamelCase=[8, 8, 4, 4] , __lowerCamelCase="gelu" , __lowerCamelCase=0.02 , __lowerCamelCase=1e-6 , __lowerCamelCase=1e-2 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , **__lowerCamelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__lowerCamelCase ) snake_case: Any = image_size snake_case: List[str] = num_channels snake_case: List[Any] = patch_sizes snake_case: List[str] = strides snake_case: Optional[int] = hidden_sizes snake_case: str = depths snake_case: str = mlp_ratios snake_case: Dict = hidden_act snake_case: List[str] = initializer_range snake_case: List[Any] = layer_norm_eps snake_case: Optional[Any] = layer_scale_init_value snake_case: str = drop_path_rate snake_case: Optional[Any] = dropout_rate
164
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowerCamelCase ( __snake_case ): __lowerCamelCase = 'facebook/bart-large-mnli' __lowerCamelCase = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) __lowerCamelCase = 'text_classifier' __lowerCamelCase = AutoTokenizer __lowerCamelCase = AutoModelForSequenceClassification __lowerCamelCase = ['text', ['text']] __lowerCamelCase = ['text'] def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' super().setup() snake_case: Dict = self.model.config snake_case: Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail""" ): snake_case: Any = int(__lowerCamelCase ) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" ) def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str: '''simple docstring''' snake_case: Union[str, Any] = labels return self.pre_processor( [text] * len(__lowerCamelCase ) , [F"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def lowerCAmelCase_ ( self , __lowerCamelCase ) -> str: '''simple docstring''' snake_case: List[str] = outputs.logits snake_case: int = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
164
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _snake_case ( A , A , A , A , A ) -> Dict: # load base model lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase__ = load_file(A ) lowerCAmelCase__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) lowerCAmelCase__ = pipeline.text_encoder else: lowerCAmelCase__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) lowerCAmelCase__ = pipeline.unet # find the target layer lowerCAmelCase__ = layer_infos.pop(0 ) while len(A ) > -1: try: lowerCAmelCase__ = curr_layer.__getattr__(A ) if len(A ) > 0: lowerCAmelCase__ = layer_infos.pop(0 ) elif len(A ) == 0: break except Exception: if len(A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase__ = layer_infos.pop(0 ) lowerCAmelCase__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(A ) else: pair_keys.append(A ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase__ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ) # update visited list for item in pair_keys: visited.append(A ) return pipeline if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = args.base_model_path __UpperCAmelCase = args.checkpoint_path __UpperCAmelCase = args.dump_path __UpperCAmelCase = args.lora_prefix_unet __UpperCAmelCase = args.lora_prefix_text_encoder __UpperCAmelCase = args.alpha __UpperCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __UpperCAmelCase = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
90
import math from datetime import datetime, timedelta def _A( UpperCamelCase__ : int ) -> datetime: '''simple docstring''' __lowercase = year % 19 __lowercase = year % 4 __lowercase = year % 7 __lowercase = math.floor(year / 100 ) __lowercase = math.floor((13 + 8 * leap_day_inhibits) / 25 ) __lowercase = leap_day_inhibits / 4 __lowercase = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 __lowercase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon __lowercase = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase__ , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(UpperCamelCase__ , 4 , 18 ) else: return datetime(UpperCamelCase__ , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCAmelCase__ = "will be" if year > datetime.now().year else "was" print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
332
0
"""simple docstring""" import os import sys import transformers _snake_case = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
702
"""simple docstring""" def snake_case ( _a: list[list[float]] )-> list[list[float]]: '''simple docstring''' lowerCamelCase__ = [] for data in source_data: for i, el in enumerate(_a ): if len(_a ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_a ) ) return data_lists def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]: '''simple docstring''' lowerCamelCase__ = [] for dlist, weight in zip(_a , _a ): lowerCamelCase__ = min(_a ) lowerCamelCase__ = max(_a ) lowerCamelCase__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCamelCase__ = F'Invalid weight of {weight:f} provided' raise ValueError(_a ) score_lists.append(_a ) return score_lists def snake_case ( _a: list[list[float]] )-> list[float]: '''simple docstring''' lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_a ): lowerCamelCase__ = final_scores[j] + ele return final_scores def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]: '''simple docstring''' lowerCamelCase__ = get_data(_a ) lowerCamelCase__ = calculate_each_score(_a , _a ) lowerCamelCase__ = generate_final_scores(_a ) # append scores to source data for i, ele in enumerate(_a ): source_data[i].append(_a ) return source_data
659
0
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : int = ['a', 'b', 'c'] # Defaults to last layer if both are None SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['c'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , [2] ) # Out indices set to match out features SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(['a', 'c'] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['a', 'c'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] ) # Out features set to match out indices SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [0, 2] , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['a', 'c'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] ) # Out features selected from negative indices SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [-3, -1] , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['a', 'c'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , [-3, -1] ) def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , _SCREAMING_SNAKE_CASE ) # Out features must be a list with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(_SCREAMING_SNAKE_CASE ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = BackboneMixin() SCREAMING_SNAKE_CASE : Optional[int] = ['a', 'b', 'c'] SCREAMING_SNAKE_CASE : Tuple = ['a', 'c'] SCREAMING_SNAKE_CASE : Any = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly SCREAMING_SNAKE_CASE : Optional[int] = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) SCREAMING_SNAKE_CASE : Union[str, Any] = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
265
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule A_ : str = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
265
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json", } class UpperCamelCase_ ( A ): '''simple docstring''' a :Union[str, Any] = 'align_text_model' def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = use_cache lowerCAmelCase_ = pad_token_id @classmethod def lowercase__ ( cls , _UpperCAmelCase , **_UpperCAmelCase): cls._set_token_in_kwargs(_UpperCAmelCase) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''') == "align": lowerCAmelCase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase) class UpperCamelCase_ ( A ): '''simple docstring''' a :Any = 'align_vision_model' def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.25 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2_560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 0.001 , _UpperCAmelCase = 0.99 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = width_coefficient lowerCAmelCase_ = depth_coefficient lowerCAmelCase_ = depth_divisor lowerCAmelCase_ = kernel_sizes lowerCAmelCase_ = in_channels lowerCAmelCase_ = out_channels lowerCAmelCase_ = depthwise_padding lowerCAmelCase_ = strides lowerCAmelCase_ = num_block_repeats lowerCAmelCase_ = expand_ratios lowerCAmelCase_ = squeeze_expansion_ratio lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dim lowerCAmelCase_ = pooling_type lowerCAmelCase_ = initializer_range lowerCAmelCase_ = batch_norm_eps lowerCAmelCase_ = batch_norm_momentum lowerCAmelCase_ = drop_connect_rate lowerCAmelCase_ = sum(_UpperCAmelCase) * 4 @classmethod def lowercase__ ( cls , _UpperCAmelCase , **_UpperCAmelCase): cls._set_token_in_kwargs(_UpperCAmelCase) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''') == "align": lowerCAmelCase_ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase) class UpperCamelCase_ ( A ): '''simple docstring''' a :List[str] = 'align' a :int = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase) if text_config is None: lowerCAmelCase_ = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''') if vision_config is None: lowerCAmelCase_ = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''') lowerCAmelCase_ = AlignTextConfig(**_UpperCAmelCase) lowerCAmelCase_ = AlignVisionConfig(**_UpperCAmelCase) lowerCAmelCase_ = projection_dim lowerCAmelCase_ = temperature_init_value lowerCAmelCase_ = initializer_range @classmethod def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase) def lowercase__ ( self): lowerCAmelCase_ = copy.deepcopy(self.__dict__) lowerCAmelCase_ = self.text_config.to_dict() lowerCAmelCase_ = self.vision_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
413
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case = { "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
413
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging UpperCAmelCase__ : List[str] = logging.get_logger(__name__) def lowercase_ ( _snake_case ): if isinstance(_snake_case ,np.ndarray ): return list(tensor.shape ) SCREAMING_SNAKE_CASE__ : int = tf.shape(_snake_case ) if tensor.shape == tf.TensorShape(_snake_case ): return dynamic SCREAMING_SNAKE_CASE__ : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_snake_case )] def lowercase_ ( _snake_case ,_snake_case = None ,_snake_case = None ): return tf.nn.softmax(logits=logits + 1E-9 ,axis=_snake_case ,name=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=1E-5 ,_snake_case=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_snake_case ,_snake_case ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.nn.moments(_snake_case ,axes=[axis] ,keepdims=_snake_case ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis SCREAMING_SNAKE_CASE__ : Optional[int] = [1] * inputs.shape.rank SCREAMING_SNAKE_CASE__ : Optional[int] = shape_list(_snake_case )[axis] SCREAMING_SNAKE_CASE__ : int = tf.reshape(_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : int = tf.reshape(_snake_case ,_snake_case ) # Compute layer normalization using the batch_normalization # function. SCREAMING_SNAKE_CASE__ : Dict = tf.nn.batch_normalization( _snake_case ,_snake_case ,_snake_case ,offset=_snake_case ,scale=_snake_case ,variance_epsilon=_snake_case ,) return outputs def lowercase_ ( _snake_case ,_snake_case=0 ,_snake_case=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.shape(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 ) return tf.reshape(_snake_case ,_snake_case ) def lowercase_ ( _snake_case ): if not isinstance(_snake_case ,tf.Tensor ): SCREAMING_SNAKE_CASE__ : str = tf.convert_to_tensor(_snake_case ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: SCREAMING_SNAKE_CASE__ : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: SCREAMING_SNAKE_CASE__ : int = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) SCREAMING_SNAKE_CASE__ : List[Any] = ( tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase_ ( _snake_case ,_snake_case ,_snake_case = "input_ids" ): tf.debugging.assert_less( _snake_case ,tf.cast(_snake_case ,dtype=tensor.dtype ) ,message=( f'''The maximum value of {tensor_name} ({tf.math.reduce_max(_snake_case )}) must be smaller than the embedding ''' f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) ,) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. SCREAMING_SNAKE_CASE__ : int = [x for x in data if len(_snake_case ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' f'''bytes: {bad_attributes}''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : str = np.array_split(_snake_case ,_snake_case ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 SCREAMING_SNAKE_CASE__ : List[str] = np.array_split(_snake_case ,_snake_case ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_data else: SCREAMING_SNAKE_CASE__ : Any = data def lowercase_ ( _snake_case ,_snake_case ): if name in group.attrs: SCREAMING_SNAKE_CASE__ : Dict = [n.decode("""utf8""" ) if hasattr(_snake_case ,"""decode""" ) else n for n in group.attrs[name]] else: SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(_snake_case ,"""decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase_ ( _snake_case ): def _expand_single_ad_tensor(_snake_case ): if isinstance(_snake_case ,tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_snake_case ,axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor ,_snake_case )
223
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase__ : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowercase_ ( _snake_case ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def lowercase_ ( _snake_case ): from diffusers.utils.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE__ : Any = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case ,id=_snake_case )
223
1
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str: if number > 0: raise ValueError('input must be a negative integer' ) __UpperCAmelCase =len(bin(_lowerCAmelCase )[3:] ) __UpperCAmelCase =bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:] __UpperCAmelCase =( ( '1' + '0' * (binary_number_length - len(_lowerCAmelCase )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
701
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCamelCase_ = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str: __UpperCAmelCase ={} state_dict.pop('''pixel_mean''' , snake_case__ ) state_dict.pop('''pixel_std''' , snake_case__ ) __UpperCAmelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __UpperCAmelCase =key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): __UpperCAmelCase =int(re.match(snake_case__ , snake_case__ ).group(2 ) ) if layer_nb == 0: __UpperCAmelCase =key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: __UpperCAmelCase =key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: __UpperCAmelCase =key.replace('''layers.2''' , '''proj_out''' ) __UpperCAmelCase =value __UpperCAmelCase =model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything" ) -> Optional[int]: __UpperCAmelCase =hf_hub_download(snake_case__ , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: __UpperCAmelCase =SamConfig() elif "sam_vit_l" in model_name: __UpperCAmelCase =SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) __UpperCAmelCase =SamConfig( vision_config=snake_case__ , ) elif "sam_vit_h" in model_name: __UpperCAmelCase =SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) __UpperCAmelCase =SamConfig( vision_config=snake_case__ , ) __UpperCAmelCase =torch.load(snake_case__ , map_location='''cpu''' ) __UpperCAmelCase =replace_keys(snake_case__ ) __UpperCAmelCase =SamImageProcessor() __UpperCAmelCase =SamProcessor(image_processor=snake_case__ ) __UpperCAmelCase =SamModel(snake_case__ ) hf_model.load_state_dict(snake_case__ ) __UpperCAmelCase =hf_model.to('''cuda''' ) __UpperCAmelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' __UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' ) __UpperCAmelCase =[[[400, 650]]] __UpperCAmelCase =[[1]] __UpperCAmelCase =processor(images=np.array(snake_case__ ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCAmelCase =hf_model(**snake_case__ ) __UpperCAmelCase =output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 __UpperCAmelCase =processor( images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCAmelCase =hf_model(**snake_case__ ) __UpperCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 __UpperCAmelCase =((75, 275, 1725, 850),) __UpperCAmelCase =processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCAmelCase =hf_model(**snake_case__ ) __UpperCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. __UpperCAmelCase =[[[400, 650], [800, 650]]] __UpperCAmelCase =[[1, 1]] __UpperCAmelCase =processor( images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCAmelCase =hf_model(**snake_case__ ) __UpperCAmelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() UpperCamelCase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) UpperCamelCase_ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
142
0
import sys _lowercase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _A (UpperCamelCase : Dict ) ->int: '''simple docstring''' lowerCamelCase__ : Optional[int] = 1 for digit in s: product *= int(lowerCAmelCase_ ) return product def _A (UpperCamelCase : Optional[Any] = N ) ->int: '''simple docstring''' lowerCamelCase__ : Dict = -sys.maxsize - 1 lowerCamelCase__ : Dict = n[:13] lowerCamelCase__ : List[str] = 13 while cur_index < len(lowerCAmelCase_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowerCamelCase__ : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: lowerCamelCase__ : Optional[Any] = max(lowerCAmelCase_ , str_eval(lowerCAmelCase_ ) ) lowerCamelCase__ : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
157
"""simple docstring""" from ...configuration_utils import PretrainedConfig class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : str = '''bert-generation''' def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ): """simple docstring""" super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = position_embedding_type _snake_case = use_cache
103
0
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path snake_case_ : Any = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowercase_ ( _lowercase : Any=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase_ ) ) class snake_case__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def __lowerCAmelCase ( self : Union[str, Any] , lowercase : List[str] , lowercase : Any ): '''simple docstring''' with TemporaryDirectory() as tmp_dir: UpperCAmelCase : Union[str, Any] = dataset_module_factory(lowercase , cache_dir=lowercase ) UpperCAmelCase : int = import_main_class(dataset_module.module_path , dataset=lowercase ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , ) UpperCAmelCase : List[Any] = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowercase ).replace(os.sep , "/" ), config.DATASET_INFO_FILENAME, ] ) UpperCAmelCase : List[str] = cached_path(lowercase , cache_dir=lowercase ) self.assertTrue(os.path.exists(lowercase ) ) @pytest.mark.integration def lowercase_ ( _lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase : Any = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" UpperCAmelCase : Any = dataset_module_factory("wikipedia" , cache_dir=_lowercase ) UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=_lowercase , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam UpperCAmelCase : Dict = None builder_instance.download_and_prepare() UpperCAmelCase : Dict = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowercase_ ( _lowercase : Optional[int] ): '''simple docstring''' UpperCAmelCase : List[Any] = dataset_module_factory("wikipedia" , cache_dir=_lowercase ) UpperCAmelCase : List[str] = import_main_class(dataset_module.module_path , dataset=_lowercase ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=_lowercase , config_name="20220301.frr" , hash=dataset_module.hash , ) UpperCAmelCase : Dict = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowercase , _lowercase ) assert "train" in ds assert isinstance(ds["train"] , _lowercase ) assert next(iter(ds["train"] ) )
292
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : Union[str, Any] = """▁""" snake_case_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase : str = self.tokenizer_class(lowercase , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "<s>" UpperCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(lowercase ) , 10_04 ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : Tuple = self.get_rust_tokenizer() UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : Optional[int] = tokenizer.tokenize(lowercase ) UpperCAmelCase : Tuple = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) UpperCAmelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) UpperCAmelCase : Tuple = self.get_rust_tokenizer() UpperCAmelCase : Dict = tokenizer.encode(lowercase ) UpperCAmelCase : List[str] = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' UpperCAmelCase : Dict = BigBirdTokenizer(lowercase , keep_accents=lowercase ) UpperCAmelCase : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase : str = "Hello World!" UpperCAmelCase : Union[str, Any] = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @slow def __lowerCAmelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase : List[str] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off UpperCAmelCase : Tuple = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) ) @require_torch @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence UpperCAmelCase : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase : Optional[Any] = " ".join(lowercase ) UpperCAmelCase : List[Any] = self.big_tokenizer.encode_plus(lowercase , return_tensors="pt" , return_token_type_ids=lowercase ) UpperCAmelCase : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase ) UpperCAmelCase : Optional[Any] = BigBirdConfig(attention_type="original_full" ) UpperCAmelCase : List[Any] = BigBirdModel(lowercase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase ) model(**lowercase ) @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) UpperCAmelCase : Any = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase : Optional[int] = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
292
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp() # fmt: off _UpperCAmelCase : int = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _UpperCAmelCase : Dict = dict(zip(A , range(len(A ) ) ) ) _UpperCAmelCase : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _UpperCAmelCase : Optional[int] = {'''unk_token''': '''<unk>'''} _UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A ) ) _UpperCAmelCase : Optional[int] = { '''do_resize''': True, '''size''': 2_0, '''do_center_crop''': True, '''crop_size''': 1_8, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } _UpperCAmelCase : str = os.path.join(self.tmpdirname , A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A , A ) def __lowerCAmelCase ( self , **A ) -> Dict: return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A ) def __lowerCAmelCase ( self , **A ) -> str: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A ) def __lowerCAmelCase ( self , **A ) -> Tuple: return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A ) def __lowerCAmelCase ( self ) -> str: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Union[str, Any] = self.get_tokenizer() _UpperCAmelCase : List[Any] = self.get_rust_tokenizer() _UpperCAmelCase : int = self.get_image_processor() _UpperCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A , image_processor=A ) processor_slow.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A ) _UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=A , image_processor=A ) processor_fast.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A ) self.assertIsInstance(processor_fast.tokenizer , A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A ) self.assertIsInstance(processor_fast.image_processor , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=A ) _UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Union[str, Any] = self.get_image_processor() _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs() _UpperCAmelCase : Any = image_processor(A , return_tensors='''np''' ) _UpperCAmelCase : Optional[int] = processor(images=A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Optional[Any] = self.get_image_processor() _UpperCAmelCase : Tuple = self.get_tokenizer() _UpperCAmelCase : str = OwlViTProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : List[str] = '''lower newer''' _UpperCAmelCase : List[str] = processor(text=A , return_tensors='''np''' ) _UpperCAmelCase : Tuple = tokenizer(A , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = self.get_image_processor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Tuple = '''lower newer''' _UpperCAmelCase : Optional[int] = self.prepare_image_inputs() _UpperCAmelCase : Dict = processor(text=A , images=A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : int = '''google/owlvit-base-patch32''' _UpperCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(A ) _UpperCAmelCase : str = ['''cat''', '''nasa badge'''] _UpperCAmelCase : Any = processor(text=A ) _UpperCAmelCase : Optional[int] = 1_6 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = '''google/owlvit-base-patch32''' _UpperCAmelCase : List[str] = OwlViTProcessor.from_pretrained(A ) _UpperCAmelCase : Optional[Any] = [['''cat''', '''nasa badge'''], ['''person''']] _UpperCAmelCase : Tuple = processor(text=A ) _UpperCAmelCase : Any = 1_6 _UpperCAmelCase : Optional[Any] = len(A ) _UpperCAmelCase : int = max([len(A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Optional[int] = '''google/owlvit-base-patch32''' _UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(A ) _UpperCAmelCase : Any = ['''cat''', '''nasa badge'''] _UpperCAmelCase : Optional[Any] = processor(text=A ) _UpperCAmelCase : List[Any] = 1_6 _UpperCAmelCase : str = inputs['''input_ids'''] _UpperCAmelCase : str = [ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Dict = self.get_image_processor() _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Optional[int] = self.prepare_image_inputs() _UpperCAmelCase : Tuple = self.prepare_image_inputs() _UpperCAmelCase : Union[str, Any] = processor(images=A , query_images=A ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Tuple = self.get_image_processor() _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : Any = OwlViTProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase : Optional[int] = processor.batch_decode(A ) _UpperCAmelCase : List[str] = tokenizer.batch_decode(A ) self.assertListEqual(A , A )
506
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 100_0000 ): _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Optional[int] = 1 for current_denominator in range(1 , limit + 1 ): _UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _UpperCAmelCase : List[Any] = current_numerator _UpperCAmelCase : Any = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
506
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
'''simple docstring''' __lowerCamelCase = range(2, 20 + 1) __lowerCamelCase = [10**k for k in range(ks[-1] + 1)] __lowerCamelCase = {} def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple: A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ) A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) ) A_ , A_ = 0, 0 A_ = n - i A_ = memo.get(UpperCAmelCase__ ) if sub_memo is not None: A_ = sub_memo.get(UpperCAmelCase__ ) if jumps is not None and len(UpperCAmelCase__ ) > 0: # find and make the largest jump without going over A_ = -1 for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: A_ = _k break if max_jump >= 0: A_ , A_ , A_ = jumps[max_jump] # since the difference between jumps is cached, add c A_ = diff + c for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ): A_ , A_ = divmod(UpperCAmelCase__, 10 ) if new_c > 0: add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) else: A_ = [] else: A_ = {c: []} A_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ ) diff += _diff dn += terms_jumped A_ = sub_memo[c] # keep jumps sorted by # of terms skipped A_ = 0 while j < len(UpperCAmelCase__ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) ) return (diff, dn) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int: if i >= n: return 0, i if k > len(UpperCAmelCase__ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) A_ = i A_ , A_ , A_ = 0, 0, 0 for j in range(len(UpperCAmelCase__ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 A_ = ds_c + ds_b diff += addend A_ = 0 for j in range(UpperCAmelCase__ ): A_ = a_i[j] + addend A_ , A_ = divmod(UpperCAmelCase__, 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) return diff, i - start_i def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str: for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ): A_ = digits[j] + addend if s >= 10: A_ , A_ = divmod(UpperCAmelCase__, 10 ) A_ = addend // 10 + quotient else: A_ = s A_ = addend // 10 if addend == 0: break while addend > 0: A_ , A_ = divmod(UpperCAmelCase__, 10 ) digits.append(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int: A_ = [1] A_ = 1 A_ = 0 while True: A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ ) dn += terms_jumped if dn == n - i: break A_ = 0 for j in range(len(UpperCAmelCase__ ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f"""{solution() = }""")
667
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) A_ = logging.getLogger() def _UpperCamelCase ( A ): UpperCamelCase_ ={} UpperCamelCase_ =os.path.join(A , "all_results.json" ) if os.path.exists(A ): with open(A , "r" ) as f: UpperCamelCase_ =json.load(A ) else: raise ValueError(f"""can\'t find {path}""" ) return results A_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class __lowerCAmelCase ( __a ): '''simple docstring''' def UpperCamelCase__ ( self: Dict ): import xla_spawn UpperCamelCase_ =self.get_auto_remove_tmp_dir() UpperCamelCase_ =f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(a__ , "argv" , a__ ): UpperCamelCase_ =time() xla_spawn.main() UpperCamelCase_ =time() UpperCamelCase_ =get_results(a__ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def UpperCamelCase__ ( self: List[Any] ): import xla_spawn UpperCamelCase_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(a__ , "argv" , a__ ): xla_spawn.main()
391
'''simple docstring''' import os import sys import unittest _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _lowerCAmelCase = os.path.join(git_repo_path, "src", "diffusers") class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def snake_case__ ( self : List[str] ): __magic_name__ = find_backend(''' if not is_torch_available():''' ) self.assertEqual(a__ , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(a__ , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __magic_name__ = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(a__ , '''torch_and_transformers_and_onnx''' ) def snake_case__ ( self : List[Any] ): __magic_name__ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , a__ ) self.assertIn('''torch_and_transformers''' , a__ ) self.assertIn('''flax_and_transformers''' , a__ ) self.assertIn('''torch_and_transformers_and_onnx''' , a__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def snake_case__ ( self : Dict ): __magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(a__ , '''\nCONSTANT = None\n''' ) __magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( a__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __magic_name__ = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(a__ , a__ ) def snake_case__ ( self : Tuple ): __magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , a__ )
432
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _lowerCamelCase ( unittest.TestCase ): _lowerCamelCase :Any = ViTImageProcessor if is_vision_available() else None @property def _lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCAmelCase__ : List[str] = (3, 32, 1_28) lowerCAmelCase__ : str = tempfile.mkdtemp() # fmt: off lowerCAmelCase__ : Tuple = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase__ : Union[str, Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) lowerCAmelCase__ : List[str] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 1_28}, } lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , UpperCamelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase , UpperCamelCase ) def _lowerCAmelCase ( self : int , **UpperCamelCase : Optional[int] ) -> Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def _lowerCAmelCase ( self : str , **UpperCamelCase : Optional[Any] ) -> Any: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ) def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Tuple = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) lowerCAmelCase__ : str = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) return image_input def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : List[str] = self.get_tokenizer() lowerCAmelCase__ : List[str] = self.get_image_processor() lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase ) def _lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.get_tokenizer() lowerCAmelCase__ : int = self.get_image_processor() lowerCAmelCase__ : Dict = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase__ : int = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 ) lowerCAmelCase__ : Optional[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase ) def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.get_image_processor() lowerCAmelCase__ : Any = self.get_tokenizer() lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Optional[int] = image_processor(UpperCamelCase , return_tensors="""np""" ) lowerCAmelCase__ : List[Any] = processor(images=UpperCamelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : str = self.get_image_processor() lowerCAmelCase__ : List[Any] = self.get_tokenizer() lowerCAmelCase__ : Dict = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : Dict = """test""" lowerCAmelCase__ : Any = processor(text=UpperCamelCase ) lowerCAmelCase__ : int = tokenizer(UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : Optional[Any] = self.get_tokenizer() lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : Tuple = """test""" lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Union[str, Any] = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def _lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : Optional[Any] = self.get_tokenizer() lowerCAmelCase__ : Tuple = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ : Dict = processor.char_decode(UpperCamelCase ) lowerCAmelCase__ : str = tokenizer.batch_decode(UpperCamelCase ) lowerCAmelCase__ : str = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(UpperCamelCase , UpperCamelCase ) def _lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.get_image_processor() lowerCAmelCase__ : Tuple = self.get_tokenizer() lowerCAmelCase__ : List[Any] = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : int = None lowerCAmelCase__ : Optional[Any] = self.prepare_image_inputs() lowerCAmelCase__ : str = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def _lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Any = self.get_image_processor() lowerCAmelCase__ : Optional[int] = self.get_tokenizer() lowerCAmelCase__ : int = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowerCAmelCase__ : int = torch.randn(1 , 27 , 38 ) lowerCAmelCase__ : Optional[Any] = torch.randn(1 , 27 , 5_02_57 ) lowerCAmelCase__ : List[Any] = torch.randn(1 , 27 , 3_05_22 ) lowerCAmelCase__ : Union[str, Any] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
507
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _A = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _A = direct_transformers_import(PATH_TO_TRANSFORMERS) _A = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _A = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowercase_ ( __UpperCAmelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = None # source code of `config_class` lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase__ : Optional[Any] = ckpt_name break return checkpoint def lowercase_ ( ) -> Dict: lowerCAmelCase__ : Union[str, Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
507
1
def lowerCamelCase__ ( __A :int ): """simple docstring""" if length <= 0 or not isinstance(__A ,__A ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(__A )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
268
def lowerCamelCase__ ( __A :int ,__A :int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) __snake_case = str(bin(__A ) )[2:] # remove the leading "0b" __snake_case = str(bin(__A ) )[2:] # remove the leading "0b" __snake_case = max(len(__A ) ,len(__A ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(__A ) ,b_binary.zfill(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): @slow def snake_case_ ( self): A__ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''') A__ = tf.convert_to_tensor( [[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" A__ = model(a__)['''last_hidden_state'''] A__ = tf.TensorShape((1, 1_0, 7_6_8)) self.assertEqual(output.shape , a__) # compare the actual values for a slice. A__ = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
526
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _UpperCAmelCase : def __init__( self , a__ , a__=1_3 , a__=6_4 , a__=2 , a__=3 , a__=True , a__=True , a__=3_2 , a__=5 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=1_0 , a__=0.0_2 , a__=[1, 1_6, 4, 4] , a__=None , ): A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 3_2) ** 2 A__ = num_patches + 1 def snake_case_ ( self): A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def snake_case_ ( self): A__ = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 1_6, 3_2], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a__ , ) def snake_case_ ( self , a__ , a__ , a__): A__ = ViTHybridModel(config=a__) model.to(a__) model.eval() A__ = model(a__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def snake_case_ ( self , a__ , a__ , a__): A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(a__) model.to(a__) model.eval() A__ = model(a__ , labels=a__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def snake_case_ ( self): A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ): UpperCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase__ = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def snake_case_ ( self): A__ = ViTHybridModelTester(self) A__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=3_7) def snake_case_ ( self): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''') def snake_case_ ( self): pass def snake_case_ ( self): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(a__) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ , nn.Linear)) def snake_case_ ( self): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(a__) A__ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__) def snake_case_ ( self): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__) def snake_case_ ( self): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__) def snake_case_ ( self): A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(a__) for model_class in self.all_model_classes: A__ = model_class(config=a__) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def snake_case_ ( self): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(a__) self.assertIsNotNone(a__) def lowerCAmelCase__ ( )-> str: A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def snake_case_ ( self): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def snake_case_ ( self): A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( a__) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=a__ , return_tensors='''pt''').to(a__) # forward pass with torch.no_grad(): A__ = model(**a__) # verify the logits A__ = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , a__) A__ = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9]).to(a__) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4)) @slow @require_accelerate def snake_case_ ( self): A__ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''') A__ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''') A__ = prepare_img() A__ = image_processor(images=a__ , return_tensors='''pt''') A__ = model(**a__) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
526
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a__ ( __magic_name__ ): def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(UpperCamelCase_ , "width_multiplier")) class a__ : def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : str=13 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Dict="swish" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=0.25 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , ): """simple docstring""" __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Union[str, Any] = image_size __UpperCAmelCase : Any = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8) __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Union[str, Any] = conv_kernel_size __UpperCAmelCase : List[Any] = output_stride __UpperCAmelCase : int = classifier_dropout_prob __UpperCAmelCase : Any = use_labels __UpperCAmelCase : List[Any] = is_training __UpperCAmelCase : Any = num_labels __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Union[str, Any] = scope __UpperCAmelCase : int = width_multiplier __UpperCAmelCase : Optional[int] = ffn_dropout __UpperCAmelCase : Optional[int] = attn_dropout def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : str = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels) __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) __UpperCAmelCase : Any = self.get_config() return config, pixel_values, labels, pixel_labels def a_ ( self : Union[str, Any]): """simple docstring""" return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : str = MobileViTVaModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : int = MobileViTVaForImageClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple): """simple docstring""" __UpperCAmelCase : Any = self.num_labels __UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Tuple = model(UpperCamelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : str = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase_ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = MobileViTVaModelTester(self) __UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds") def a_ ( self : Optional[int]): """simple docstring""" pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings") def a_ ( self : List[Any]): """simple docstring""" pass @unittest.skip(reason="MobileViTV2 does not output attentions") def a_ ( self : List[Any]): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.") def a_ ( self : str): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.") def a_ ( self : List[str]): """simple docstring""" pass def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]): __UpperCAmelCase : str = model_class(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() with torch.no_grad(): __UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : Tuple = outputs.hidden_states __UpperCAmelCase : Tuple = 5 self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __UpperCAmelCase : Dict = 2 for i in range(len(UpperCamelCase_)): self.assertListEqual( list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2) __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_) @slow def a_ ( self : Dict): """simple docstring""" for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = MobileViTVaModel.from_pretrained(UpperCamelCase_) self.assertIsNotNone(UpperCamelCase_) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") if is_vision_available() else None ) @slow def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to( UpperCamelCase_) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : str = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : List[str] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01]).to(UpperCamelCase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4)) @slow def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : int = model.to(UpperCamelCase_) __UpperCAmelCase : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**UpperCamelCase_) __UpperCAmelCase : Tuple = outputs.logits # verify the logits __UpperCAmelCase : int = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape , UpperCamelCase_) __UpperCAmelCase : List[str] = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4)) @slow def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : Union[str, Any] = model.to(UpperCamelCase_) __UpperCAmelCase : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : List[str] = prepare_img() __UpperCAmelCase : Dict = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : List[str] = model(**UpperCamelCase_) __UpperCAmelCase : int = outputs.logits.detach().cpu() __UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)]) __UpperCAmelCase : Tuple = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape , UpperCamelCase_) __UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_) __UpperCAmelCase : Tuple = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape , UpperCamelCase_)
77
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class lowerCamelCase__ ( unittest.TestCase ): def __a ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights A = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase ) A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )] A = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class lowerCamelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] ): A , A = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase ) A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.random.PRNGKey(0 ) A = 4 A = jax.device_count() A = num_samples * [prompt] A = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A = replicate(_lowercase ) A = jax.random.split(_lowercase , _lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3 assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1 A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_lowercase ) == num_samples def __a ( self : Dict ): A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase ) A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.random.PRNGKey(0 ) A = 50 A = jax.device_count() A = num_samples * [prompt] A = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A = replicate(_lowercase ) A = jax.random.split(_lowercase , _lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1 def __a ( self : List[str] ): A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase ) A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.random.PRNGKey(0 ) A = 50 A = jax.device_count() A = num_samples * [prompt] A = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A = replicate(_lowercase ) A = jax.random.split(_lowercase , _lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1 def __a ( self : str ): A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.random.PRNGKey(0 ) A = 50 A = jax.device_count() A = num_samples * [prompt] A = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A = replicate(_lowercase ) A = jax.random.split(_lowercase , _lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1 def __a ( self : Any ): A = FlaxDDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , ) A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , ) A = scheduler.create_state() A = scheduler_state A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.random.PRNGKey(0 ) A = 50 A = jax.device_count() A = num_samples * [prompt] A = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A = replicate(_lowercase ) A = jax.random.split(_lowercase , _lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1 def __a ( self : List[str] ): A = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) A = jax.device_count() A = num_samples * [prompt] A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase ) A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , ) A = replicate(_lowercase ) A = pipeline.prepare_inputs(_lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) A = images[2, 0, 256, 10:17, 1] # With memory efficient attention A , A = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , ) A = replicate(_lowercase ) A = pipeline.prepare_inputs(_lowercase ) A = shard(_lowercase ) A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) A = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
690
0
"""simple docstring""" import numpy as np def UpperCamelCase_ ( lowerCamelCase : np.array ) -> np.array: """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
147
"""simple docstring""" import functools def UpperCamelCase_ ( lowerCamelCase : str , lowerCamelCase : str ) -> int: """simple docstring""" __magic_name__ : List[str] = len(lowerCamelCase ) __magic_name__ : Dict = len(lowerCamelCase ) @functools.cache def min_distance(lowerCamelCase : int , lowerCamelCase : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __magic_name__ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
147
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __a = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
97
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase ( _a , _a ) -> str | Literal[False]: '''simple docstring''' lowercase_ :str = list(_a ) lowercase_ :Dict = list(_a ) lowercase_ :int = 0 for i in range(len(_a ) ): if lista[i] != lista[i]: count += 1 lowercase_ :List[str] = '''_''' if count > 1: return False else: return "".join(_a ) def UpperCamelCase ( _a ) -> list[str]: '''simple docstring''' lowercase_ :List[Any] = [] while True: lowercase_ :List[Any] = ['''$'''] * len(_a ) lowercase_ :Any = [] for i in range(len(_a ) ): for j in range(i + 1 , len(_a ) ): lowercase_ :Optional[Any] = compare_string(binary[i] , binary[j] ) if k is False: lowercase_ :str = '''*''' lowercase_ :Any = '''*''' temp.append('''X''' ) for i in range(len(_a ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_a ) == 0: return pi lowercase_ :int = list(set(_a ) ) def UpperCamelCase ( _a , _a ) -> list[str]: '''simple docstring''' lowercase_ :Union[str, Any] = [] for minterm in minterms: lowercase_ :str = '''''' for _ in range(_a ): lowercase_ :Any = str(minterm % 2 ) + string minterm //= 2 temp.append(_a ) return temp def UpperCamelCase ( _a , _a , _a ) -> bool: '''simple docstring''' lowercase_ :Dict = list(_a ) lowercase_ :Dict = list(_a ) lowercase_ :Dict = 0 for i in range(len(_a ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase ( _a , _a ) -> list[str]: '''simple docstring''' lowercase_ :Union[str, Any] = [] lowercase_ :Optional[Any] = [0] * len(_a ) for i in range(len(chart[0] ) ): lowercase_ :int = 0 lowercase_ :List[Any] = -1 for j in range(len(_a ) ): if chart[j][i] == 1: count += 1 lowercase_ :Dict = j if count == 1: lowercase_ :Any = 1 for i in range(len(_a ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_a ) ): lowercase_ :str = 0 temp.append(prime_implicants[i] ) while True: lowercase_ :List[Any] = 0 lowercase_ :Any = -1 lowercase_ :Any = 0 for i in range(len(_a ) ): lowercase_ :str = chart[i].count(1 ) if count_n > max_n: lowercase_ :List[Any] = count_n lowercase_ :str = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_a ) ): lowercase_ :Dict = 0 def UpperCamelCase ( _a , _a ) -> list[list[int]]: '''simple docstring''' lowercase_ :List[Any] = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )] for i in range(len(_a ) ): lowercase_ :List[Any] = prime_implicants[i].count('''_''' ) for j in range(len(_a ) ): if is_for_table(prime_implicants[i] , binary[j] , _a ): lowercase_ :str = 1 return chart def UpperCamelCase ( ) -> None: '''simple docstring''' lowercase_ :Dict = int(input('''Enter the no. of variables\n''' ) ) lowercase_ :int = [ float(_a ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] lowercase_ :Tuple = decimal_to_binary(_a , _a ) lowercase_ :str = check(_a ) print('''Prime Implicants are:''' ) print(_a ) lowercase_ :Union[str, Any] = prime_implicant_chart(_a , _a ) lowercase_ :int = selection(_a , _a ) print('''Essential Prime Implicants are:''' ) print(_a ) if __name__ == "__main__": import doctest doctest.testmod() main()
257
0
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path _lowercase : Optional[Any] ="src/transformers" # Matches is_xxx_available() _lowercase : Dict =re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _lowercase : int =re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowercase : List[Any] =re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available _lowercase : str =re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _lowercase : str =re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowercase : Tuple =re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _lowercase : Optional[Any] =re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], _lowercase : Any =re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _lowercase : List[str] =re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _lowercase : int =re.compile(R"^\s*try:") # Catches a line with else: _lowercase : Tuple =re.compile(R"^\s*else:") def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> List[Any]: if _re_test_backend.search(UpperCamelCase__ ) is None: return None snake_case__ : Tuple = [b[0] for b in _re_backend.findall(UpperCamelCase__ )] backends.sort() return "_and_".join(UpperCamelCase__ ) def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> str: with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: snake_case__ : Optional[Any] = f.readlines() snake_case__ : List[str] = 0 while line_index < len(UpperCamelCase__ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase__ ): return None # First grab the objects without a specific backend in _import_structure snake_case__ : str = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: snake_case__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase__ ): snake_case__ : Dict = _re_one_line_import_struct.search(UpperCamelCase__ ).groups()[0] snake_case__ : List[str] = re.findall('''\[([^\]]+)\]''' , UpperCamelCase__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue snake_case__ : Union[str, Any] = _re_import_struct_key_value.search(UpperCamelCase__ ) if single_line_import_search is not None: snake_case__ : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase__ ) > 0] objects.extend(UpperCamelCase__ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 snake_case__ : List[Any] = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ : int = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): snake_case__ : Any = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase__ ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase__ ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase__ ) is not None: snake_case__ : Any = _re_import_struct_add_many.search(UpperCamelCase__ ).groups()[0].split(''', ''' ) snake_case__ : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase__ ) > 0] objects.extend(UpperCamelCase__ ) elif _re_between_brackets.search(UpperCamelCase__ ) is not None: snake_case__ : Union[str, Any] = _re_between_brackets.search(UpperCamelCase__ ).groups()[0].split(''', ''' ) snake_case__ : Optional[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase__ ) > 0] objects.extend(UpperCamelCase__ ) elif _re_quote_object.search(UpperCamelCase__ ) is not None: objects.append(_re_quote_object.search(UpperCamelCase__ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 snake_case__ : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case__ : Optional[Any] = [] while ( line_index < len(UpperCamelCase__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): snake_case__ : Dict = lines[line_index] snake_case__ : Optional[int] = _re_import.search(UpperCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case__ : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase__ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case__ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): snake_case__ : List[str] = lines[line_index] snake_case__ : List[Any] = _re_import.search(UpperCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case__ : Dict = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __UpperCAmelCase ( UpperCamelCase__ :Any , UpperCamelCase__ :str ) -> Optional[int]: def find_duplicates(UpperCamelCase__ :str ): return [k for k, v in collections.Counter(UpperCamelCase__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case__ : Dict = [] for key in import_dict_objects.keys(): snake_case__ : Tuple = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case__ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case__ : Union[str, Any] = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def __UpperCAmelCase ( ) -> str: snake_case__ : Optional[Any] = [] for root, _, files in os.walk(UpperCamelCase__ ): if "__init__.py" in files: snake_case__ : Tuple = os.path.join(UpperCamelCase__ , '''__init__.py''' ) snake_case__ : Tuple = parse_init(UpperCamelCase__ ) if objects is not None: snake_case__ : List[str] = analyze_results(*UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: snake_case__ : List[Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(UpperCamelCase__ ) ) if len(UpperCamelCase__ ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase__ ) ) def __UpperCAmelCase ( ) -> str: snake_case__ : Tuple = [] for path, directories, files in os.walk(UpperCamelCase__ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase__ ) / folder).glob('''*.py''' ) ) ) == 0: continue snake_case__ : Union[str, Any] = str((Path(UpperCamelCase__ ) / folder).relative_to(UpperCamelCase__ ) ) snake_case__ : Any = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase__ ) for fname in files: if fname == "__init__.py": continue snake_case__ : Union[str, Any] = str((Path(UpperCamelCase__ ) / fname).relative_to(UpperCamelCase__ ) ) snake_case__ : Optional[int] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase__ ) return submodules _lowercase : int =[ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def __UpperCAmelCase ( ) -> Optional[int]: # This is to make sure the transformers module imported is the one in the repo. snake_case__ : str = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(UpperCamelCase__ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) snake_case__ : Tuple = spec.loader.load_module() snake_case__ : List[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(UpperCamelCase__ ) > 0: snake_case__ : Dict = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
708
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _lowercase : List[Any] =TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) _lowercase : List[Any] =[] _lowercase : List[str] =[] _lowercase : Tuple ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} _lowercase : Union[str, Any] =[ { "type": "header", "text": { "type": "plain_text", "text": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", "emoji": True, }, } ] _lowercase : int =0 for log in Path().glob("*.log"): _lowercase : Tuple =0 with open(log, "r") as f: for line in f: _lowercase : str =json.loads(line) if line.get("nodeid", "") != "": _lowercase : List[Any] =line["nodeid"] if line.get("duration", None) is not None: _lowercase : Optional[int] =F"{line['duration']:.4f}" if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _lowercase : Optional[int] =[] log.unlink() _lowercase : Optional[int] ="" _lowercase : List[Any] =[] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" _lowercase : Dict =[] _lowercase : Dict ={} for test in failed_tests: _lowercase : Union[str, Any] =test[0].split("::") _lowercase : Dict =data[0].split("/")[-1] if data[0] not in filesafailed: _lowercase : Union[str, Any] =[data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _lowercase : Dict =[test[0] for test in failed_table] _lowercase : Dict =list(set(files)) # Count number of instances in failed_tests _lowercase : Tuple =[] for file in individual_files: table.append([file, len(filesafailed[file])]) _lowercase : Dict =tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: _lowercase : Tuple ="Too many failed tests, please see the full report in the Action results." _lowercase : Optional[int] =len(err) + 10 _lowercase : str =message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: _lowercase : Union[str, Any] ="No failed tests! 🤗" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient _lowercase : List[str] =WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": _lowercase : Optional[Any] ={ "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) _lowercase : Union[str, Any] ={ "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) _lowercase : Tuple ={ "type": "context", "elements": [ { "type": "plain_text", "text": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) _lowercase : List[str] =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) _lowercase : int =response.data["ts"] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _lowercase : Tuple ="" for i, row in enumerate(test_failures): if row[0] != test_class: _lowercase : List[str] =row[0] else: _lowercase : int ="" _lowercase : Tuple ={ "type": "section", "text": { "type": "mrkdwn", "text": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
574
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def __UpperCAmelCase ( __a : Any ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _a : List[Any] = k.replace(__a ,__a ) if k.startswith('''encoder''' ): _a : Dict = k.replace('''.attn''' ,'''.self_attn''' ) _a : Optional[int] = k.replace('''norm1''' ,'''self_attn_layer_norm''' ) _a : Union[str, Any] = k.replace('''norm2''' ,'''final_layer_norm''' ) elif k.startswith('''decoder''' ): _a : int = k.replace('''norm1''' ,'''self_attn_layer_norm''' ) _a : Any = k.replace('''norm2''' ,'''encoder_attn_layer_norm''' ) _a : List[Any] = k.replace('''norm3''' ,'''final_layer_norm''' ) return k def __UpperCAmelCase ( __a : Union[str, Any] ) -> List[Any]: """simple docstring""" _a : List[str] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _a : List[str] = sd.pop(__a ) _a : Any = k.replace('''layernorm_embedding''' ,'''layer_norm''' ) assert new_k not in sd _a : str = v a__ = ['''START'''] @torch.no_grad() def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> str: """simple docstring""" _a : Optional[Any] = torch.load(__a ,map_location='''cpu''' ) _a : Optional[int] = model['''model'''] _a : Tuple = BlenderbotConfig.from_json_file(__a ) _a : Tuple = BlenderbotForConditionalGeneration(__a ) _a : Union[str, Any] = m.model.state_dict().keys() _a : str = [] _a : Optional[int] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _a : str = rename_state_dict_key(__a ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _a : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__a ) m.model.load_state_dict(__a ,strict=__a ) m.half() m.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) a__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
14
"""simple docstring""" import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a=3 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = FalconModel(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a ) __lowerCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): __lowerCAmelCase = True __lowerCAmelCase = FalconModel(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) __lowerCAmelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , ) __lowerCAmelCase = model(__a , attention_mask=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): __lowerCAmelCase = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): __lowerCAmelCase = True __lowerCAmelCase = True __lowerCAmelCase = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() # first forward pass __lowerCAmelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , ) __lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) __lowerCAmelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0] __lowerCAmelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) ) def snake_case ( self ): __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __UpperCAmelCase : Union[str, Any] =(FalconForCausalLM,) if is_torch_available() else () __UpperCAmelCase : List[Any] =( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Any =False __UpperCAmelCase : Tuple =False def snake_case ( self ): __lowerCAmelCase = FalconModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def snake_case ( self ): __lowerCAmelCase , *__lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __lowerCAmelCase = alibi self.model_tester.create_and_check_model(__a , *__a ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = 3 __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = input_ids.ne(1 ).to(__a ) __lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowerCAmelCase = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = 3 __lowerCAmelCase = "single_label_classification" __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = input_ids.ne(1 ).to(__a ) __lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowerCAmelCase = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = FalconForCausalLM(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , use_cache=__a ) __lowerCAmelCase = input_ids.shape[0] __lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values ) __lowerCAmelCase = model._convert_cache_to_standard_format(__a , __a ) for layer in range(len(__a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def snake_case ( self ): __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = 3 __lowerCAmelCase = "multi_label_classification" __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = input_ids.ne(1 ).to(__a ) __lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __lowerCAmelCase = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__a , "use_cache" ): return __lowerCAmelCase = model_class(__a ).to(__a ) if "use_cache" not in inputs: __lowerCAmelCase = True __lowerCAmelCase = model(**__a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __lowerCAmelCase = ( getattr(__a , "decoder_layers" , __a ) or getattr(__a , "num_decoder_layers" , __a ) or config.num_hidden_layers ) __lowerCAmelCase = getattr(__a , "num_kv_heads" , config.num_attention_heads ) __lowerCAmelCase = getattr(__a , "d_model" , config.hidden_size ) __lowerCAmelCase = embed_dim // num_attention_heads __lowerCAmelCase = outputs["past_key_values"] self.assertEqual(len(__a ) , __a ) __lowerCAmelCase , __lowerCAmelCase = inputs["input_ids"].shape for i in range(__a ): if config.new_decoder_architecture: __lowerCAmelCase = config.num_attention_heads elif config.multi_query: __lowerCAmelCase = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self ): __lowerCAmelCase = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) __lowerCAmelCase = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(__a ) __lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) __lowerCAmelCase = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) __lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=19 ) __lowerCAmelCase = tokenizer.batch_decode(__a )[0] self.assertEqual(__a , __a ) @slow def snake_case ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __lowerCAmelCase = AutoTokenizer.from_pretrained(__a ) __lowerCAmelCase = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(__a ) __lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , num_beams=2 , max_new_tokens=4 ) @slow def snake_case ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __lowerCAmelCase = AutoTokenizer.from_pretrained(__a ) __lowerCAmelCase = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(device=__a ) __lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # Test results are the same with and without cache __lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) __lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
636
0
from collections.abc import Sequence def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = None ) -> int: if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) __lowerCamelCase : Tuple = nums[0] for i in range(1 , len(lowerCamelCase__ ) ): __lowerCamelCase : Union[str, Any] = nums[i] __lowerCamelCase : Union[str, Any] = max(lowerCamelCase__ , ans + num , lowerCamelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user a =int(input("""Enter number of elements : """).strip()) a =list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
337
import argparse import json import subprocess def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: __lowerCamelCase : List[str] = [] __lowerCamelCase : List[str] = ( F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) __lowerCamelCase : Any = subprocess.run(lowerCamelCase__ , shell=lowerCamelCase__ , stdout=subprocess.PIPE ) __lowerCamelCase : List[Any] = output.stdout.decode('utf-8' ) __lowerCamelCase : Optional[Any] = json.loads(lowerCamelCase__ ) __lowerCamelCase : str = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(lowerCamelCase__ ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) ) if len(lowerCamelCase__ ) > 0: __lowerCamelCase : Union[str, Any] = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"The following runners are offline:\n{failed}" ) if __name__ == "__main__": def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return values.split(',' ) a =argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) a =parser.parse_args() get_runner_status(args.target_runners, args.token)
337
1
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16000 ): '''simple docstring''' UpperCAmelCase__ : int = int(round(sample_rate * max_length ) ) if len(__UpperCamelCase ) <= sample_length: return wav UpperCAmelCase__ : List[str] = randint(0 , len(__UpperCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __lowercase : snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} ) snake_case_ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) snake_case_ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) snake_case_ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) snake_case_ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) snake_case_ = field( default=2_0 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class __lowercase : snake_case_ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} ) snake_case_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __lowercase ( self : Dict ): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" ,A ,) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __UpperCamelCase , __UpperCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase__ : Any = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCAmelCase__ : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. UpperCAmelCase__ : Union[str, Any] = DatasetDict() UpperCAmelCase__ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase__ : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " """Make sure to set `--label_column_name` to the correct text column - one of """ F"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCAmelCase__ : List[Any] = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCAmelCase__ : Dict = feature_extractor.model_input_names[0] def train_transforms(__UpperCamelCase ): UpperCAmelCase__ : List[str] = [] for audio in batch[data_args.audio_column_name]: UpperCAmelCase__ : Dict = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__UpperCamelCase ) UpperCAmelCase__ : List[str] = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCAmelCase__ : List[str] = {model_input_name: inputs.get(__UpperCamelCase )} UpperCAmelCase__ : Tuple = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__UpperCamelCase ): UpperCAmelCase__ : str = [audio["""array"""] for audio in batch[data_args.audio_column_name]] UpperCAmelCase__ : List[Any] = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCAmelCase__ : Dict = {model_input_name: inputs.get(__UpperCamelCase )} UpperCAmelCase__ : List[str] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCAmelCase__ : Any = raw_datasets["""train"""].features[data_args.label_column_name].names UpperCAmelCase__ , UpperCAmelCase__ : List[str] = {}, {} for i, label in enumerate(__UpperCamelCase ): UpperCAmelCase__ : Any = str(__UpperCamelCase ) UpperCAmelCase__ : int = label # Load the accuracy metric from the datasets package UpperCAmelCase__ : Any = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__UpperCamelCase ): UpperCAmelCase__ : Dict = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__UpperCamelCase , references=eval_pred.label_ids ) UpperCAmelCase__ : Any = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel=__UpperCamelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase__ : List[Any] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCAmelCase__ : Optional[int] = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCAmelCase__ : Tuple = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase ) # Initialize our trainer UpperCAmelCase__ : Optional[Any] = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , ) # Training if training_args.do_train: UpperCAmelCase__ : Dict = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase__ : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase__ : str = last_checkpoint UpperCAmelCase__ : str = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase__ : int = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCamelCase ) trainer.save_metrics("""eval""" , __UpperCamelCase ) # Write model card and (optionally) push to hub UpperCAmelCase__ : List[str] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCamelCase ) else: trainer.create_model_card(**__UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class _UpperCamelCase : def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str=13 , _SCREAMING_SNAKE_CASE: Union[str, Any]=7 , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Optional[Any]=99 , _SCREAMING_SNAKE_CASE: str=[1, 1, 2] , _SCREAMING_SNAKE_CASE: Optional[int]=1 , _SCREAMING_SNAKE_CASE: Tuple=32 , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=8 , _SCREAMING_SNAKE_CASE: Optional[int]=37 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu_new" , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: List[str]=False , ) -> Any: """simple docstring""" UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_input_mask UpperCamelCase_ = use_token_type_ids UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = block_sizes UpperCamelCase_ = num_decoder_layers UpperCamelCase_ = d_model UpperCamelCase_ = n_head UpperCamelCase_ = d_head UpperCamelCase_ = d_inner UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout UpperCamelCase_ = attention_dropout UpperCamelCase_ = activation_dropout UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = type_vocab_size UpperCamelCase_ = 2 UpperCamelCase_ = num_labels UpperCamelCase_ = num_choices UpperCamelCase_ = scope UpperCamelCase_ = initializer_std # Used in the tests to check the size of the first attention layer UpperCamelCase_ = n_head # Used in the tests to check the size of the first hidden state UpperCamelCase_ = self.d_model # Used in the tests to check the number of output hidden states/attentions UpperCamelCase_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: UpperCamelCase_ = self.num_hidden_layers + 2 def lowercase ( self: str ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = None if self.use_input_mask: UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase_ = None if self.use_token_type_ids: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = None if self.use_labels: UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase_ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , ) -> List[str]: """simple docstring""" UpperCamelCase_ = TFFunnelModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = [input_ids, input_mask] UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCamelCase_ = False UpperCamelCase_ = TFFunnelModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCamelCase_ = False UpperCamelCase_ = TFFunnelModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , ) -> List[str]: """simple docstring""" UpperCamelCase_ = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = [input_ids, input_mask] UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) UpperCamelCase_ = False UpperCamelCase_ = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) UpperCamelCase_ = False UpperCamelCase_ = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , ) -> Any: """simple docstring""" UpperCamelCase_ = TFFunnelForPreTraining(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> str: """simple docstring""" UpperCamelCase_ = TFFunnelForMaskedLM(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , ) -> List[Any]: """simple docstring""" UpperCamelCase_ = self.num_labels UpperCamelCase_ = TFFunnelForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = self.num_choices UpperCamelCase_ = TFFunnelForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , ) -> Any: """simple docstring""" UpperCamelCase_ = self.num_labels UpperCamelCase_ = TFFunnelForTokenClassification(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , ) -> str: """simple docstring""" UpperCamelCase_ = TFFunnelForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase ( self: List[str] ) -> List[Any]: """simple docstring""" UpperCamelCase_ = self.prepare_config_and_inputs() ( ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ) = config_and_inputs UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _UpperCamelCase : List[str] = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( { '''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel), '''fill-mask''': TFFunnelForMaskedLM, '''question-answering''': TFFunnelForQuestionAnswering, '''text-classification''': TFFunnelForSequenceClassification, '''token-classification''': TFFunnelForTokenClassification, '''zero-shot''': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : Any = False _UpperCamelCase : Union[str, Any] = False def lowercase ( self: str ) -> Tuple: """simple docstring""" UpperCamelCase_ = TFFunnelModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE ) def lowercase ( self: Tuple ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def lowercase ( self: Union[str, Any] ) -> int: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: List[str] ) -> Dict: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: int ) -> Dict: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: int ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) @require_tf class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): _UpperCamelCase : str = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Any = False def lowercase ( self: Optional[int] ) -> List[str]: """simple docstring""" UpperCamelCase_ = TFFunnelModelTester(self , base=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE ) def lowercase ( self: Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def lowercase ( self: Any ) -> str: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: Dict ) -> List[Any]: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def lowercase ( self: Union[str, Any] ) -> Dict: """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
371
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCamelCase : @staticmethod def lowercase ( *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]: """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class _UpperCamelCase ( unittest.TestCase ): _UpperCamelCase : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Any: """simple docstring""" UpperCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCamelCase_ = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = object_detector(examples[0] , threshold=0.0 ) UpperCamelCase_ = len(_SCREAMING_SNAKE_CASE ) self.assertGreater(_SCREAMING_SNAKE_CASE , 0 ) self.assertEqual( _SCREAMING_SNAKE_CASE , [ { "score": ANY(_SCREAMING_SNAKE_CASE ), "label": ANY(_SCREAMING_SNAKE_CASE ), "box": {"xmin": ANY(_SCREAMING_SNAKE_CASE ), "ymin": ANY(_SCREAMING_SNAKE_CASE ), "xmax": ANY(_SCREAMING_SNAKE_CASE ), "ymax": ANY(_SCREAMING_SNAKE_CASE )}, } for i in range(_SCREAMING_SNAKE_CASE ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowercase ( self: Tuple ) -> List[str]: """simple docstring""" pass @require_torch def lowercase ( self: Union[str, Any] ) -> str: """simple docstring""" UpperCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCamelCase_ = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) UpperCamelCase_ = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def lowercase ( self: List[str] ) -> List[str]: """simple docstring""" UpperCamelCase_ = pipeline("zero-shot-object-detection" ) UpperCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) UpperCamelCase_ = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowercase ( self: List[Any] ) -> str: """simple docstring""" pass @require_torch @slow def lowercase ( self: Any ) -> int: """simple docstring""" UpperCamelCase_ = 0.2 UpperCamelCase_ = pipeline("zero-shot-object-detection" ) UpperCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def lowercase ( self: Dict ) -> Tuple: """simple docstring""" UpperCamelCase_ = 2 UpperCamelCase_ = pipeline("zero-shot-object-detection" ) UpperCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
371
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __UpperCamelCase = logging.get_logger(__name__) @dataclass class lowerCAmelCase ( lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self , **lowerCAmelCase__ ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE = deprecated_arg[3:] setattr(self , _UpperCamelCase , not kwargs.pop(_UpperCamelCase ) ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE = kwargs.pop('torchscript' , self.torchscript ) SCREAMING_SNAKE_CASE = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level ) super().__init__(**_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : bool = field(default=lowercase__ , metadata={"""help""": """Trace the models using torchscript"""} ) SCREAMING_SNAKE_CASE_ : bool = field(default=lowercase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} ) SCREAMING_SNAKE_CASE_ : str = field( default="""O1""" , metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) } , ) @cached_property def __A ( self ) -> int: requires_backends(self , ['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: SCREAMING_SNAKE_CASE = torch.device('cpu' ) SCREAMING_SNAKE_CASE = 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE = xm.xla_device() SCREAMING_SNAKE_CASE = 0 else: SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) SCREAMING_SNAKE_CASE = torch.cuda.device_count() return device, n_gpu @property def __A ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def __A ( self ) -> Tuple: requires_backends(self , ['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __A ( self ) -> Union[str, Any]: requires_backends(self , ['torch'] ) return self._setup_devices[0] @property def __A ( self ) -> List[str]: requires_backends(self , ['torch'] ) return self._setup_devices[1] @property def __A ( self ) -> Dict: return self.n_gpu > 0
247
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __UpperCamelCase ( lowercase__ ): def __init__( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Any=1_3 ,_UpperCamelCase :Tuple=7 ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Optional[int]=False ,_UpperCamelCase :Any=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :int=3_2 ,_UpperCamelCase :int=5 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[Any]=6_4 ,_UpperCamelCase :Optional[int]="gelu" ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=5_1_2 ,_UpperCamelCase :Any=1_6 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Optional[int]=3 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :int=None ,_UpperCamelCase :int=2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Any=2 ,_UpperCamelCase :int=4 ,_UpperCamelCase :Tuple=1 ,): snake_case_ : List[Any] = parent snake_case_ : List[str] = batch_size snake_case_ : List[Any] = seq_length snake_case_ : Optional[int] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Tuple = use_labels snake_case_ : Optional[Any] = vocab_size snake_case_ : Any = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : Dict = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Optional[Any] = type_vocab_size snake_case_ : Optional[Any] = type_sequence_label_size snake_case_ : Optional[Any] = initializer_range snake_case_ : int = num_labels snake_case_ : Dict = num_choices snake_case_ : Optional[Any] = scope snake_case_ : str = q_groups snake_case_ : Any = k_groups snake_case_ : Any = v_groups snake_case_ : Tuple = post_attention_groups snake_case_ : Dict = intermediate_groups snake_case_ : int = output_groups def a__ ( self :Tuple ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : str = None if self.use_input_mask: snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Any = None snake_case_ : Any = None snake_case_ : Any = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices ) snake_case_ : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self :Optional[int] ): return SqueezeBertConfig( embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,) def a__ ( self :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :str ): snake_case_ : List[Any] = SqueezeBertModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : Optional[Any] = model(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : List[str] = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = SqueezeBertForMaskedLM(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : Union[str, Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self :str ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = SqueezeBertForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : str = model( _UpperCamelCase ,attention_mask=_UpperCamelCase ,start_positions=_UpperCamelCase ,end_positions=_UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def a__ ( self :str ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Dict = self.num_labels snake_case_ : Any = SqueezeBertForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : Tuple = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def a__ ( self :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :Optional[Any] ): snake_case_ : Dict = self.num_labels snake_case_ : List[str] = SqueezeBertForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : str = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self :Dict ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ): snake_case_ : Tuple = self.num_choices snake_case_ : int = SqueezeBertForMultipleChoice(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() snake_case_ : Dict = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() snake_case_ : Any = model( _UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : int = config_and_inputs snake_case_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase : str = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase : str = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase : Optional[Any] = False lowercase : Tuple = True lowercase : List[str] = False def a__ ( self :Optional[Any] ): snake_case_ : Optional[int] = SqueezeBertModelTester(self ) snake_case_ : Dict = ConfigTester(self ,config_class=_UpperCamelCase ,dim=3_7 ) def a__ ( self :List[str] ): self.config_tester.run_common_tests() def a__ ( self :Any ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_UpperCamelCase ) def a__ ( self :Union[str, Any] ): snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCamelCase ) def a__ ( self :List[Any] ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCamelCase ) def a__ ( self :Tuple ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCamelCase ) def a__ ( self :List[str] ): snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCamelCase ) def a__ ( self :int ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCamelCase ) @slow def a__ ( self :int ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = SqueezeBertModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :str ): snake_case_ : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) snake_case_ : str = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] ) snake_case_ : Optional[int] = model(_UpperCamelCase )[0] snake_case_ : List[Any] = torch.Size((1, 3) ) self.assertEqual(output.shape ,_UpperCamelCase ) snake_case_ : int = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] ) self.assertTrue(torch.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-4 ) )
334
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
1
from __future__ import annotations lowerCamelCase : Union[str, Any] ='''Muhammad Umer Farooq''' lowerCamelCase : Tuple ='''MIT''' lowerCamelCase : Dict ='''1.0.0''' lowerCamelCase : List[str] ='''Muhammad Umer Farooq''' lowerCamelCase : Union[str, Any] ='''contact@muhammadumerfarooq.me''' lowerCamelCase : Any ='''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class __a ( A__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' super().__init__() UpperCamelCase__ : list[str] = [] UpperCamelCase__ : List[str] = domain def __lowercase ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[tuple[str, str | None]] ): '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCamelCase__ : List[str] = parse.urljoin(self.domain , SCREAMING_SNAKE_CASE ) self.urls.append(SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: return parse.urlparse(__lowerCAmelCase ).netloc def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "https://github.com" ) -> list[str]: UpperCamelCase__ : Dict = get_domain_name(__lowerCAmelCase ) # Initialize the parser UpperCamelCase__ : Union[str, Any] = Parser(__lowerCAmelCase ) try: # Open URL UpperCamelCase__ : str = requests.get(__lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCamelCase__ : List[str] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCamelCase__ : Optional[Any] = requests.get(__lowerCAmelCase ) # Get the valid email. UpperCamelCase__ : Dict = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] =emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
228
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10 , __lowerCAmelCase = 22 ) -> int: UpperCamelCase__ : Any = range(1 , __lowerCAmelCase ) UpperCamelCase__ : Any = range(1 , __lowerCAmelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
228
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black __snake_case : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __snake_case : Optional[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Any ) -> int: '''simple docstring''' A__ : Dict =tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) ) A__ : Optional[Any] =self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , ) def lowercase__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' A__ : List[Any] ="""src/diffusers""" shutil.rmtree(self.diffusers_dir ) def lowercase__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=None ) -> str: '''simple docstring''' A__ : Union[str, Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result A__ : int =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) A__ : Tuple =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) A__ : Tuple =os.path.join(self.diffusers_dir , """new_code.py""" ) with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , """r""" ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' A__ : Union[str, Any] =check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , ) # Copy consistency with a really long name A__ : Optional[int] ="""TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase_ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
715
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( __snake_case : int ) -> Optional[int]: """simple docstring""" random.seed(__snake_case ) np.random.seed(__snake_case ) torch.manual_seed(__snake_case ) torch.cuda.manual_seed_all(__snake_case ) # ^^ safe to call this function even if cuda is not available class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Optional[Any] =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : List[str] =parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility A__ : int =True if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None: A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Union[str, Any] =kwargs["""max_value"""] if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) A__ : Optional[Any] =kwargs["""min_value"""] A__ : Any =list(lowerCAmelCase_ ) A__ : int =[p.clone().detach() for p in parameters] if kwargs.get("""device""" , lowerCAmelCase_ ) is not None: A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) self.to(device=kwargs["""device"""] ) A__ : Optional[int] =None A__ : Any =decay A__ : List[Any] =min_decay A__ : Optional[int] =update_after_step A__ : List[str] =use_ema_warmup A__ : str =inv_gamma A__ : Union[str, Any] =power A__ : str =0 A__ : str =None # set in `step()` A__ : List[str] =model_cls A__ : Optional[int] =model_config @classmethod def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel": '''simple docstring''' A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ ) A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ ) A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config ) ema_model.load_state_dict(lowerCAmelCase_ ) return ema_model def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) A__ : Optional[int] =self.model_cls.from_config(self.model_config ) A__ : Optional[Any] =self.state_dict() state_dict.pop("""shadow_params""" , lowerCAmelCase_ ) model.register_to_config(**lowerCAmelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowerCAmelCase_ ) def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float: '''simple docstring''' A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power else: A__ : Union[str, Any] =(1 + step) / (10 + step) A__ : str =min(lowerCAmelCase_ , self.decay ) # make sure decay is not smaller than min_decay A__ : int =max(lowerCAmelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: '''simple docstring''' if isinstance(lowerCAmelCase_ , torch.nn.Module ): A__ : Any =( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , ) A__ : Optional[int] =parameters.parameters() A__ : Dict =list(lowerCAmelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. A__ : Any =self.get_decay(self.optimization_step ) A__ : Optional[int] =decay A__ : List[str] =1 - decay A__ : str =contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : Optional[Any] =list(lowerCAmelCase_ ) for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None: '''simple docstring''' A__ : str =[ p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ ) for p in self.shadow_params ] def lowercase__ ( self : Optional[Any] ) -> dict: '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' A__ : List[str] =[param.detach().cpu().clone() for param in parameters] def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None: '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. A__ : List[str] =None def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None: '''simple docstring''' A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ ) A__ : List[Any] =state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , lowerCAmelCase_ ): raise ValueError("""Invalid min_decay""" ) A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , lowerCAmelCase_ ): raise ValueError("""Invalid optimization_step""" ) A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , lowerCAmelCase_ ): raise ValueError("""Invalid update_after_step""" ) A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ): raise ValueError("""Invalid use_ema_warmup""" ) A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) A__ : Tuple =state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ ) if shadow_params is not None: A__ : List[str] =shadow_params if not isinstance(self.shadow_params , lowerCAmelCase_ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
687
0
'''simple docstring''' import os from pathlib import Path def UpperCamelCase_ ( ) -> Optional[int]: """simple docstring""" from torch.utils.cpp_extension import load _UpperCAmelCase : Any = Path(_UpperCAmelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr" _UpperCAmelCase : List[str] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
244
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __SCREAMING_SNAKE_CASE : Optional[int] = {"""UserAgent""": UserAgent().random} def UpperCamelCase_ ( _UpperCAmelCase : Dict ) -> dict: """simple docstring""" _UpperCAmelCase : List[Any] = script.contents[0] _UpperCAmelCase : List[Any] = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[str] , A : List[Any] ): _UpperCAmelCase : int = F"""https://www.instagram.com/{username}/""" _UpperCAmelCase : Tuple = self.get_json() def _A ( self : str ): _UpperCAmelCase : Optional[Any] = requests.get(self.url , headers=A ).text _UpperCAmelCase : Union[str, Any] = BeautifulSoup(A , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ): return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : int ): return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def _A ( self : List[str] ): return self.user_data["username"] @property def _A ( self : Dict ): return self.user_data["full_name"] @property def _A ( self : Tuple ): return self.user_data["biography"] @property def _A ( self : Tuple ): return self.user_data["business_email"] @property def _A ( self : str ): return self.user_data["external_url"] @property def _A ( self : Union[str, Any] ): return self.user_data["edge_followed_by"]["count"] @property def _A ( self : List[Any] ): return self.user_data["edge_follow"]["count"] @property def _A ( self : int ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A ( self : Optional[Any] ): return self.user_data["profile_pic_url_hd"] @property def _A ( self : str ): return self.user_data["is_verified"] @property def _A ( self : Tuple ): return self.user_data["is_private"] def UpperCamelCase_ ( _UpperCAmelCase : str = "github" ) -> None: """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions _UpperCAmelCase : List[Any] = InstagramUser(_UpperCAmelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _UpperCAmelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Any = InstagramUser("""github""") print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
244
1
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
208
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowerCamelCase__ : List[str] = random.Random() if is_torch_available(): import torch def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]: if rng is None: snake_case__ = global_rng snake_case__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __magic_name__ (unittest.TestCase ): '''simple docstring''' def __init__( self:List[Any] , _a:Tuple , _a:List[str]=7 , _a:List[str]=4_00 , _a:Any=20_00 , _a:Dict=1 , _a:Any=0.0 , _a:List[str]=1_60_00 , _a:Any=True , _a:Optional[Any]=True , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = min_seq_length snake_case__ = max_seq_length snake_case__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case__ = feature_size snake_case__ = padding_value snake_case__ = sampling_rate snake_case__ = return_attention_mask snake_case__ = do_normalize def SCREAMING_SNAKE_CASE__ ( self:int ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any]=False , _a:Dict=False ): def _flatten(_a:Tuple ): return list(itertools.chain(*_a ) ) if equal_length: snake_case__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size snake_case__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case__ = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = ASTFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = ASTFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self:int ): # Tests that all call wrap to encode_plus and batch_encode_plus snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case__ = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input snake_case__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values snake_case__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) ) # Test batched snake_case__ = feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values snake_case__ = feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_a , _a ): self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] snake_case__ = np.asarray(_a ) snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_a , _a ): self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) ) @require_torch def SCREAMING_SNAKE_CASE__ ( self:Tuple ): import torch snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ = np.random.rand(1_00 ).astype(np.floataa ) snake_case__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple ): from datasets import load_dataset snake_case__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case__ = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def SCREAMING_SNAKE_CASE__ ( self:int ): # fmt: off snake_case__ = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on snake_case__ = self._load_datasamples(1 ) snake_case__ = ASTFeatureExtractor() snake_case__ = feature_extractor(_a , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1e-4 ) )
208
1
'''simple docstring''' def snake_case_ (UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , ): '''simple docstring''' _a = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: _a = 1 - (matter_density + radiation_density + dark_energy) _a = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case : List[str] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
22
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def snake_case ( UpperCamelCase__ : str ) -> Any: lowerCamelCase : Dict = model.config lowerCamelCase : Dict = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) lowerCamelCase : Dict = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def snake_case ( UpperCamelCase__ : str ) -> List[str]: if "encoder.model" in name: lowerCamelCase : Optional[Any] = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: lowerCamelCase : Tuple = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowerCamelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: lowerCamelCase : str = """encoder.""" + name if "attn.proj" in name: lowerCamelCase : str = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowerCamelCase : str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowerCamelCase : List[str] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": lowerCamelCase : Optional[int] = """encoder.layernorm.weight""" if name == "encoder.norm.bias": lowerCamelCase : List[Any] = """encoder.layernorm.bias""" return name def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: lowerCamelCase : Tuple = key.split(""".""" ) lowerCamelCase : Dict = int(key_split[3] ) lowerCamelCase : List[Any] = int(key_split[5] ) lowerCamelCase : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase : List[Any] = val[:dim, :] lowerCamelCase : Any = val[dim : dim * 2, :] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : str = val[:dim] lowerCamelCase : List[str] = val[dim : dim * 2] lowerCamelCase : str = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: lowerCamelCase : Dict = val return orig_state_dict def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[int]: # load original model lowerCamelCase : Tuple = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model lowerCamelCase , lowerCamelCase : List[Any] = get_configs(UpperCamelCase__ ) lowerCamelCase : str = DonutSwinModel(UpperCamelCase__ ) lowerCamelCase : Optional[int] = MBartForCausalLM(UpperCamelCase__ ) lowerCamelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() lowerCamelCase : Any = original_model.state_dict() lowerCamelCase : str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document lowerCamelCase : Optional[int] = load_dataset("""hf-internal-testing/example-documents""" ) lowerCamelCase : str = dataset["""test"""][0]["""image"""].convert("""RGB""" ) lowerCamelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) lowerCamelCase : Dict = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": lowerCamelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" lowerCamelCase : Union[str, Any] = """When is the coffee break?""" lowerCamelCase : int = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": lowerCamelCase : Dict = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: lowerCamelCase : Tuple = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": lowerCamelCase : Optional[int] = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": lowerCamelCase : Optional[int] = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt lowerCamelCase : Tuple = """hello world""" else: raise ValueError("""Model name not supported""" ) lowerCamelCase : Any = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] lowerCamelCase : str = original_model.encoder.model.patch_embed(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Optional[Any] = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) # verify encoder hidden states lowerCamelCase : Union[str, Any] = original_model.encoder(UpperCamelCase__ ) lowerCamelCase : Tuple = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 ) # verify decoder hidden states lowerCamelCase : Any = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits lowerCamelCase : Tuple = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": __lowerCamelCase :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) __lowerCamelCase :Tuple = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
222
0
"""simple docstring""" from collections import defaultdict class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self :Optional[int] , __lowercase :str , __lowercase :Dict ): __lowerCamelCase : Dict =total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 __lowerCamelCase : List[Any] =[ [-1 for i in range(total + 1 )] for j in range(2 ** len(__lowercase ) ) ] __lowerCamelCase : List[Any] =defaultdict(__lowercase ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 __lowerCamelCase : List[Any] =(1 << len(__lowercase )) - 1 def __lowercase ( self :Tuple , __lowercase :int , __lowercase :List[str] ): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement __lowerCamelCase : int =self.count_ways_until(__lowercase , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. __lowerCamelCase : Dict =total_ways_util return self.dp[mask][task_no] def __lowercase ( self :Optional[Any] , __lowercase :int ): # Store the list of persons for each task for i in range(len(__lowercase ) ): for j in task_performed[i]: self.task[j].append(__lowercase ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _UpperCamelCase = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _UpperCamelCase = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
711
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" __snake_case : Dict = CycleDiffusionPipeline __snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { """negative_prompt""", """height""", """width""", """negative_prompt_embeds""", } __snake_case : Any = PipelineTesterMixin.required_optional_params - {"""latents"""} __snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} ) __snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS __snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase ( self :Dict ): torch.manual_seed(0 ) __lowerCamelCase : List[str] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __lowerCamelCase : List[Any] =DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , ) torch.manual_seed(0 ) __lowerCamelCase : Optional[int] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase : int =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __lowerCamelCase : Optional[Any] =CLIPTextModel(__lowercase ) __lowerCamelCase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCamelCase : Optional[int] ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowercase ( self :Union[str, Any] , __lowercase :Optional[int] , __lowercase :str=0 ): __lowerCamelCase : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase ) __lowerCamelCase : str =image / 2 + 0.5 if str(__lowercase ).startswith('''mps''' ): __lowerCamelCase : Union[str, Any] =torch.manual_seed(__lowercase ) else: __lowerCamelCase : Any =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCamelCase : Dict ={ '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __lowercase ( self :Optional[int] ): __lowerCamelCase : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Tuple =self.get_dummy_components() __lowerCamelCase : List[str] =CycleDiffusionPipeline(**__lowercase ) __lowerCamelCase : Tuple =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCamelCase : List[Any] =self.get_dummy_inputs(__lowercase ) __lowerCamelCase : int =pipe(**__lowercase ) __lowerCamelCase : Dict =output.images __lowerCamelCase : Union[str, Any] =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __lowerCamelCase : Optional[Any] =np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def __lowercase ( self :str ): __lowerCamelCase : str =self.get_dummy_components() for name, module in components.items(): if hasattr(__lowercase , '''half''' ): __lowerCamelCase : Union[str, Any] =module.half() __lowerCamelCase : int =CycleDiffusionPipeline(**__lowercase ) __lowerCamelCase : List[str] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCamelCase : Optional[int] =self.get_dummy_inputs(__lowercase ) __lowerCamelCase : Dict =pipe(**__lowercase ) __lowerCamelCase : List[str] =output.images __lowerCamelCase : Dict =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __lowerCamelCase : str =np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __lowercase ( self :Optional[Any] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def __lowercase ( self :Dict ): return super().test_inference_batch_single_identical() @skip_mps def __lowercase ( self :Optional[Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def __lowercase ( self :str ): return super().test_save_load_optional_components() @skip_mps def __lowercase ( self :Dict ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self :Dict ): __lowerCamelCase : Dict =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __lowerCamelCase : Union[str, Any] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __lowerCamelCase : Any =init_image.resize((512, 512) ) __lowerCamelCase : Optional[Any] ='''CompVis/stable-diffusion-v1-4''' __lowerCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' ) __lowerCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained( __lowercase , scheduler=__lowercase , safety_checker=__lowercase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) pipe.enable_attention_slicing() __lowerCamelCase : Dict ='''A black colored car''' __lowerCamelCase : Union[str, Any] ='''A blue colored car''' __lowerCamelCase : Dict =torch.manual_seed(0 ) __lowerCamelCase : Tuple =pipe( prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , ) __lowerCamelCase : Tuple =output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def __lowercase ( self :Any ): __lowerCamelCase : Tuple =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __lowerCamelCase : List[Any] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __lowerCamelCase : Optional[Any] =init_image.resize((512, 512) ) __lowerCamelCase : Any ='''CompVis/stable-diffusion-v1-4''' __lowerCamelCase : List[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' ) __lowerCamelCase : str =CycleDiffusionPipeline.from_pretrained(__lowercase , scheduler=__lowercase , safety_checker=__lowercase ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) pipe.enable_attention_slicing() __lowerCamelCase : Any ='''A black colored car''' __lowerCamelCase : int ='''A blue colored car''' __lowerCamelCase : Tuple =torch.manual_seed(0 ) __lowerCamelCase : Union[str, Any] =pipe( prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , ) __lowerCamelCase : Dict =output.images assert np.abs(image - expected_image ).max() < 2e-2
363
0
'''simple docstring''' def UpperCamelCase__ ( _lowercase : list ) -> list: if len(_lowercase ) <= 1: return [tuple(_lowercase )] __UpperCAmelCase: Any = [] def generate(_lowercase : int , _lowercase : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , _lowercase ) for i in range(k - 1 ): if k % 2 == 0: # k is even __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = arr[k - 1], arr[i] else: # k is odd __UpperCAmelCase, __UpperCAmelCase: Tuple = arr[k - 1], arr[0] generate(k - 1 , _lowercase ) generate(len(_lowercase ) , _lowercase ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')] print(heaps(arr))
523
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , **snake_case_ , ): '''simple docstring''' super().__init__( features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , ) __UpperCAmelCase: Optional[int] = Generator( cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , ) def lowercase_ ( self ): '''simple docstring''' if self.streaming: __UpperCAmelCase: List[str] = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: __UpperCAmelCase: Union[str, Any] = None __UpperCAmelCase: str = None __UpperCAmelCase: Tuple = None __UpperCAmelCase: Union[str, Any] = None self.builder.download_and_prepare( download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , ) __UpperCAmelCase: List[str] = self.builder.as_dataset( split="""train""" , verification_mode=snake_case_ , in_memory=self.keep_in_memory ) return dataset
523
1
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class A_ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : Any = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(_A)) def snake_case__ ( self) -> Tuple: """simple docstring""" _UpperCAmelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(_A)) def snake_case__ ( self) -> List[str]: """simple docstring""" _UpperCAmelCase : List[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_A)) def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : str = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(_A)) def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : str = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(_A)) def snake_case__ ( self) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCAmelCase : Tuple = '''fp16''' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> str: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCAmelCase : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> str: """simple docstring""" _UpperCAmelCase : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _UpperCAmelCase : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _UpperCAmelCase : Optional[int] = '''fp16''' self.assertFalse(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> Tuple: """simple docstring""" _UpperCAmelCase : Dict = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _UpperCAmelCase : Tuple = '''fp16''' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _UpperCAmelCase : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(_A , variant=_A)) def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _UpperCAmelCase : Union[str, Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(_A , variant=_A))
186
from __future__ import annotations def _lowerCamelCase ( __A : int ) -> list[int]: _UpperCAmelCase : List[str] = [True] * limit _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[str] = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): _UpperCAmelCase : List[str] = i * 2 while index < limit: _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : int = index + i _UpperCAmelCase : Optional[int] = [2] for i in range(3 , __A , 2 ): if is_prime[i]: primes.append(__A ) return primes def _lowerCamelCase ( __A : int = 1_000_000 ) -> int: _UpperCAmelCase : Any = prime_sieve(__A ) _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Tuple = 0 for i in range(len(__A ) ): for j in range(i + length , len(__A ) ): _UpperCAmelCase : List[Any] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: _UpperCAmelCase : List[str] = j - i _UpperCAmelCase : Optional[Any] = sol return largest if __name__ == "__main__": print(F'{solution() = }')
186
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Optional[int] = '\\n\n' _a : List[str] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' _a : Dict = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1_6 , UpperCAmelCase = True , UpperCAmelCase=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCamelCase = """cuda""" else: __lowerCamelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCamelCase = AutoModelForCausalLM.from_pretrained(UpperCAmelCase ) __lowerCamelCase = model.to(UpperCAmelCase ) __lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCamelCase = model.config.max_length - 1 else: __lowerCamelCase = model.config.max_length __lowerCamelCase = tokenizer( UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=UpperCAmelCase , ).to(UpperCAmelCase ) __lowerCamelCase = encodings["""input_ids"""] __lowerCamelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCamelCase = [] __lowerCamelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(UpperCAmelCase ) , UpperCAmelCase ) ): __lowerCamelCase = min(start_index + batch_size , len(UpperCAmelCase ) ) __lowerCamelCase = encoded_texts[start_index:end_index] __lowerCamelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase ) __lowerCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) __lowerCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase ), attn_mask] , dim=1 ) __lowerCamelCase = encoded_batch with torch.no_grad(): __lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).logits __lowerCamelCase = out_logits[..., :-1, :].contiguous() __lowerCamelCase = labels[..., 1:].contiguous() __lowerCamelCase = attn_mask[..., 1:].contiguous() __lowerCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase )}
479
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def UpperCamelCase__ ( _A: Tuple ): '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCamelCase__ ( ): '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """mock-s3-bucket""" __lowerCamelCase = f'''s3://{mock_bucket}''' __lowerCamelCase = extract_path_from_uri(_A ) assert dataset_path.startswith("""s3://""" ) is False __lowerCamelCase = """./local/path""" __lowerCamelCase = extract_path_from_uri(_A ) assert dataset_path == new_dataset_path def UpperCamelCase__ ( _A: List[Any] ): '''simple docstring''' __lowerCamelCase = is_remote_filesystem(_A ) assert is_remote is True __lowerCamelCase = fsspec.filesystem("""file""" ) __lowerCamelCase = is_remote_filesystem(_A ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , _A ) def UpperCamelCase__ ( _A: List[str] , _A: Tuple , _A: List[Any] , _A: Any , _A: List[Any] , _A: Optional[int] , _A: List[str] ): '''simple docstring''' __lowerCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} __lowerCamelCase = input_paths[compression_fs_class.protocol] if input_path is None: __lowerCamelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_A ) __lowerCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_A ) assert isinstance(_A , _A ) __lowerCamelCase = os.path.basename(_A ) __lowerCamelCase = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(_A , """r""" , encoding="""utf-8""" ) as f, open(_A , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] , _A: int ): '''simple docstring''' __lowerCamelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} __lowerCamelCase = compressed_file_paths[protocol] __lowerCamelCase = """dataset.jsonl""" __lowerCamelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}''' __lowerCamelCase , *__lowerCamelCase = fsspec.get_fs_token_paths(_A ) assert fs.isfile(_A ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def UpperCamelCase__ ( _A: str , _A: str , _A: Optional[int] , _A: Union[str, Any] ): '''simple docstring''' __lowerCamelCase = hf_api.dataset_info(_A , token=_A ) __lowerCamelCase = HfFileSystem(repo_info=_A , token=_A ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(_A ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def UpperCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_A , _A , clobber=_A ) with pytest.warns(_A ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_A ) == 1 assert ( str(warning_info[0].message ) == f'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
479
1
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __lowerCAmelCase : Dict = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def a_ (_lowerCAmelCase : List[str] )-> List[str]: snake_case: int = test_results.split(""" """ ) snake_case: str = 0 snake_case: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. snake_case: Optional[Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(_lowerCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def a_ (_lowerCAmelCase : Optional[Any] )-> Tuple: snake_case: str = {} snake_case: Any = None snake_case: str = False for line in failures_short_lines.split("""\n""" ): if re.search(R"""_ \[doctest\]""" , _lowerCAmelCase ): snake_case: Any = True snake_case: Union[str, Any] = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): snake_case: List[Any] = line snake_case: Dict = False return failures class lowerCamelCase : def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' snake_case: Dict = title snake_case: List[Any] = doc_test_results["""time_spent"""].split(""",""" )[0] snake_case: Tuple = doc_test_results["""success"""] snake_case: Dict = doc_test_results["""failures"""] snake_case: List[Any] = self.n_success + self.n_failures # Failures and success of the modeling tests snake_case: List[Any] = doc_test_results @property def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' snake_case: str = [self._time_spent] snake_case: List[str] = 0 for time in time_spent: snake_case: Dict = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(__lowerCamelCase ) == 1: snake_case: int = [0, 0, time_parts[0]] snake_case , snake_case , snake_case: str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds snake_case , snake_case , snake_case: int = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return F"{int(__lowerCamelCase )}h{int(__lowerCamelCase )}m{int(__lowerCamelCase )}s" @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' snake_case: Any = 40 snake_case: str = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__lowerCamelCase , __lowerCamelCase )} snake_case: List[Any] = """""" for category, failures in category_failures.items(): if len(__lowerCamelCase ) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(__lowerCamelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' snake_case: Tuple = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(__lowerCamelCase ) @staticmethod def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' snake_case: List[str] = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(__lowerCamelCase )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__lowerCamelCase , ) def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) snake_case: Tuple = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed.""" snake_case: Dict = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__lowerCamelCase , ) def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple: '''simple docstring''' snake_case: Optional[Any] = """""" for key, value in failures.items(): snake_case: Optional[Any] = value[:2_00] + """ [Truncated]""" if len(__lowerCamelCase ) > 2_50 else value failures_text += F"*{key}*\n_{value}_\n\n" snake_case: Dict = job_name snake_case: Dict = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: snake_case: Tuple = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) snake_case: Optional[int] = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) snake_case: List[Any] = sorted(self.doc_test_results.items() , key=lambda __lowerCamelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): snake_case: Union[str, Any] = F"*Num failures* :{len(job_result['failed'] )} \n" snake_case: int = job_result["""failures"""] snake_case: Optional[Any] = self.get_reply_blocks(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , text=__lowerCamelCase ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F"Results for {job}" , blocks=__lowerCamelCase , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def a_ ()-> int: snake_case: Any = os.environ["""GITHUB_RUN_ID"""] snake_case: Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" snake_case: Union[str, Any] = requests.get(_lowerCAmelCase ).json() snake_case: Tuple = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) snake_case: List[Any] = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(_lowerCAmelCase ): snake_case: Union[str, Any] = requests.get(url + F"&page={i + 2}" ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""" , _lowerCAmelCase ) return {} def a_ (_lowerCAmelCase : str )-> List[Any]: snake_case: Optional[int] = {} if os.path.exists(_lowerCAmelCase ): snake_case: Dict = os.listdir(_lowerCAmelCase ) for file in files: try: with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , encoding="""utf-8""" ) as f: snake_case: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(F"Could not open {os.path.join(_lowerCAmelCase , _lowerCAmelCase )}." ) from e return _artifact def a_ ()-> Any: class lowerCamelCase : def __init__( self , __lowerCamelCase ) -> int: '''simple docstring''' snake_case: Dict = name snake_case: int = [] def __str__( self ) -> Optional[int]: '''simple docstring''' return self.name def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Any: '''simple docstring''' self.paths.append({"""name""": self.name, """path""": path} ) snake_case: Dict[str, Artifact] = {} snake_case: Tuple = filter(os.path.isdir , os.listdir() ) for directory in directories: snake_case: List[Any] = directory if artifact_name not in _available_artifacts: snake_case: int = Artifact(_lowerCAmelCase ) _available_artifacts[artifact_name].add_path(_lowerCAmelCase ) return _available_artifacts if __name__ == "__main__": __lowerCAmelCase : Tuple = get_job_links() __lowerCAmelCase : Optional[int] = retrieve_available_artifacts() __lowerCAmelCase : str = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __lowerCAmelCase : int = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job __lowerCAmelCase : Tuple = github_actions_job_links.get('run_doctests') __lowerCAmelCase : List[Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0] __lowerCAmelCase : Optional[int] = retrieve_artifact(artifact_path['name']) if "stats" in artifact: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = handle_test_results(artifact['stats']) __lowerCAmelCase : Union[str, Any] = failed __lowerCAmelCase : int = success __lowerCAmelCase : int = time_spent[1:-1] + ', ' __lowerCAmelCase : str = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): __lowerCAmelCase : Any = line.replace('FAILED ', '') __lowerCAmelCase : str = line.split()[0].replace('\n', '') if "::" in line: __lowerCAmelCase , __lowerCAmelCase : List[Any] = line.split('::') else: __lowerCAmelCase , __lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __lowerCAmelCase : Any = docs[file_regex] doc_test_results[category]["failed"].append(test) __lowerCAmelCase : int = all_failures[test] if test in all_failures else 'N/A' __lowerCAmelCase : Dict = failure break __lowerCAmelCase : str = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
164
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class lowerCamelCase ( __snake_case ): __lowerCamelCase = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self , **__lowerCamelCase ) -> Any: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case: List[Any] = deprecated_arg[3:] setattr(self , __lowerCamelCase , not kwargs.pop(__lowerCamelCase ) ) logger.warning( F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" F" {positive_arg}={kwargs[positive_arg]}" ) snake_case: Tuple = kwargs.pop("""torchscript""" , self.torchscript ) snake_case: int = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) snake_case: List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**__lowerCamelCase ) __lowerCamelCase = field(default=__snake_case , metadata={'help': 'Trace the models using torchscript'} ) __lowerCamelCase = field(default=__snake_case , metadata={'help': 'Print Xla/PyTorch tpu metrics'} ) __lowerCamelCase = field( default='O1' , metadata={ 'help': ( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ' 'See details at https://nvidia.github.io/apex/amp.html' ) } , ) @cached_property def lowerCAmelCase_ ( self ) -> Tuple["torch.device", int]: '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: snake_case: List[str] = torch.device("""cpu""" ) snake_case: Optional[int] = 0 elif is_torch_tpu_available(): snake_case: Union[str, Any] = xm.xla_device() snake_case: Dict = 0 else: snake_case: List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) snake_case: str = torch.cuda.device_count() return device, n_gpu @property def lowerCAmelCase_ ( self ) -> Optional[int]: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowerCAmelCase_ ( self ) -> "torch.device": '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' return self.n_gpu > 0
164
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowerCamelCase : List[str] = None lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCamelCase : List[Any] = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } lowerCamelCase : Tuple = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } lowerCamelCase : List[str] = "▁" class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = BigBirdTokenizer UpperCamelCase = ['''input_ids''', '''attention_mask'''] UpperCamelCase = [] def __init__( self : Union[str, Any] , A_ : List[str]=None , A_ : Union[str, Any]=None , A_ : Tuple="<unk>" , A_ : List[Any]="<s>" , A_ : str="</s>" , A_ : Optional[int]="<pad>" , A_ : List[Any]="[SEP]" , A_ : Dict="[MASK]" , A_ : List[Any]="[CLS]" , **A_ : Tuple , ) -> int: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , ) lowerCamelCase_ = vocab_file lowerCamelCase_ = False if not self.vocab_file else True def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self : List[str] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
70
'''simple docstring''' def __lowerCAmelCase ( a_ ) -> bool: '''simple docstring''' if num < 0: return False SCREAMING_SNAKE_CASE : int = num SCREAMING_SNAKE_CASE : int = 0 while num > 0: SCREAMING_SNAKE_CASE : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
251
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __A( UpperCAmelCase ): SCREAMING_SNAKE_CASE = '''openai/whisper-base''' SCREAMING_SNAKE_CASE = ( '''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ''' '''transcribed text.''' ) SCREAMING_SNAKE_CASE = '''transcriber''' SCREAMING_SNAKE_CASE = WhisperProcessor SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration SCREAMING_SNAKE_CASE = ['''audio'''] SCREAMING_SNAKE_CASE = ['''text'''] def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[int] ): return self.pre_processor(__UpperCamelCase , return_tensors="""pt""" ).input_features def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int ): return self.model.generate(inputs=__UpperCamelCase ) def lowercase__ ( self : int , __UpperCamelCase : Tuple ): return self.pre_processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )[0]
103
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __A: def __init__( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Dict=7 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Any=3 , __UpperCamelCase : int=4 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=0 , ): lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = projection_dim def lowercase__ ( self : Union[str, Any] ): lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ): lowerCamelCase_ = TFDPRContextEncoder(config=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ): lowerCamelCase_ = TFDPRQuestionEncoder(config=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ): lowerCamelCase_ = TFDPRReader(config=__UpperCamelCase ) lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def lowercase__ ( self : Dict ): lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"""input_ids""": input_ids} return config, inputs_dict @require_tf class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {} SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def lowercase__ ( self : Dict ): lowerCamelCase_ = TFDPRModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase__ ( self : Any ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__UpperCamelCase ) def lowercase__ ( self : Dict ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__UpperCamelCase ) def lowercase__ ( self : List[str] ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__UpperCamelCase ) @slow def lowercase__ ( self : Optional[int] ): for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRReader.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class __A( unittest.TestCase ): @slow def lowercase__ ( self : Union[str, Any] ): lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" ) lowerCamelCase_ = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase_ = model(__UpperCamelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
103
1
"""simple docstring""" from string import ascii_lowercase, ascii_uppercase def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not sentence: return "" _SCREAMING_SNAKE_CASE : int = dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
338
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) __snake_case = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any: for attribute in key.split(""".""" ): lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if weight_type is not None: lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]: lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True else: for key, mapped_key in MAPPING.items(): lowercase_ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE_ ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "weight" in name: lowercase_ = """weight""" elif "bias" in name: lowercase_ = """bias""" else: lowercase_ = None set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]: lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE_ ) def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]: lowercase_ = SEWConfig() if is_finetuned: lowercase_ = model.wav_encoder.wav_model.cfg else: lowercase_ = model.cfg lowercase_ = fs_config.conv_bias lowercase_ = eval(fs_config.conv_feature_layers ) lowercase_ = [x[0] for x in conv_layers] lowercase_ = [x[1] for x in conv_layers] lowercase_ = [x[2] for x in conv_layers] lowercase_ = """gelu""" lowercase_ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" lowercase_ = 0.0 lowercase_ = fs_config.activation_fn.name lowercase_ = fs_config.encoder_embed_dim lowercase_ = 0.02 lowercase_ = fs_config.encoder_ffn_embed_dim lowercase_ = 1e-5 lowercase_ = fs_config.encoder_layerdrop lowercase_ = fs_config.encoder_attention_heads lowercase_ = fs_config.conv_pos_groups lowercase_ = fs_config.conv_pos lowercase_ = len(SCREAMING_SNAKE_CASE_ ) lowercase_ = fs_config.encoder_layers lowercase_ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowercase_ = model.cfg lowercase_ = fs_config.final_dropout lowercase_ = fs_config.layerdrop lowercase_ = fs_config.activation_dropout lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowercase_ = fs_config.attention_dropout lowercase_ = fs_config.dropout_input lowercase_ = fs_config.dropout lowercase_ = fs_config.mask_channel_length lowercase_ = fs_config.mask_channel_prob lowercase_ = fs_config.mask_length lowercase_ = fs_config.mask_prob lowercase_ = """Wav2Vec2FeatureExtractor""" lowercase_ = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ) ->Optional[Any]: if is_finetuned: lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowercase_ = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: lowercase_ = convert_config(model[0] , SCREAMING_SNAKE_CASE_ ) lowercase_ = model[0].eval() lowercase_ = True if config.feat_extract_norm == """layer""" else False lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ) if is_finetuned: if dict_path: lowercase_ = Dictionary.load(SCREAMING_SNAKE_CASE_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase_ = target_dict.pad_index lowercase_ = target_dict.bos_index lowercase_ = target_dict.pad_index lowercase_ = target_dict.bos_index lowercase_ = target_dict.eos_index lowercase_ = len(target_dict.symbols ) lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE_ ) ) return os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE_ ) lowercase_ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE_ , ) lowercase_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = SEWForCTC(SCREAMING_SNAKE_CASE_ ) else: lowercase_ = SEWModel(SCREAMING_SNAKE_CASE_ ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __snake_case = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
451
0
# using dfs for finding eulerian path traversal def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Optional[Any]: """simple docstring""" snake_case_ = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: snake_case_ , snake_case_ = True, True snake_case_ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return path def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]: """simple docstring""" snake_case_ = 0 snake_case_ = -1 for i in range(SCREAMING_SNAKE_CASE ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 snake_case_ = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict: """simple docstring""" snake_case_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] snake_case_ , snake_case_ = check_circuit_or_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return snake_case_ = 1 if check == 2: snake_case_ = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) snake_case_ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ()-> Union[str, Any]: """simple docstring""" snake_case_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} snake_case_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} snake_case_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} snake_case_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]} snake_case_ = { 1: [], 2: [] # all degree is zero } snake_case_ = 10 check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
531
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int: """simple docstring""" assert column_title.isupper() snake_case_ = 0 snake_case_ = len(SCREAMING_SNAKE_CASE ) - 1 snake_case_ = 0 while index >= 0: snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
531
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( a ): """simple docstring""" a_ : str =["pixel_values"] def __init__( self : Any , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : float = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : List[str] , ) -> None: '''simple docstring''' super().__init__(**_snake_case ) a__ = size if size is not None else {'shortest_edge': 384} a__ = get_size_dict(_snake_case , default_to_square=_snake_case ) a__ = do_resize a__ = size # Default value set here for backwards compatibility where the value in config is None a__ = crop_pct if crop_pct is not None else 224 / 256 a__ = resample a__ = do_rescale a__ = rescale_factor a__ = do_normalize a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : float , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Optional[int] , ) -> np.ndarray: '''simple docstring''' a__ = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) a__ = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct a__ = int(shortest_edge / crop_pct ) a__ = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case ) a__ = resize(image=_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_snake_case , size=(shortest_edge, shortest_edge) , data_format=_snake_case , **_snake_case ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _snake_case , size=(shortest_edge, shortest_edge) , resample=_snake_case , data_format=_snake_case , **_snake_case ) def _lowerCAmelCase ( self : Optional[int] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ) -> Optional[Any]: '''simple docstring''' return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def _lowerCAmelCase ( self : List[str] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : int , ) -> np.ndarray: '''simple docstring''' return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def _lowerCAmelCase ( self : Dict , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : float = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Union[str, Any] , ) -> PIL.Image.Image: '''simple docstring''' a__ = do_resize if do_resize is not None else self.do_resize a__ = crop_pct if crop_pct is not None else self.crop_pct a__ = resample if resample is not None else self.resample a__ = do_rescale if do_rescale is not None else self.do_rescale a__ = rescale_factor if rescale_factor is not None else self.rescale_factor a__ = do_normalize if do_normalize is not None else self.do_normalize a__ = image_mean if image_mean is not None else self.image_mean a__ = image_std if image_std is not None else self.image_std a__ = size if size is not None else self.size a__ = get_size_dict(_snake_case , default_to_square=_snake_case ) a__ = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a__ = [to_numpy_array(_snake_case ) for image in images] if do_resize: a__ = [self.resize(image=_snake_case , size=_snake_case , crop_pct=_snake_case , resample=_snake_case ) for image in images] if do_rescale: a__ = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: a__ = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] a__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] a__ = {'pixel_values': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case )
232
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class SCREAMING_SNAKE_CASE ( a ): """simple docstring""" a_ : Optional[int] ="beit" def __init__( self : Any , _snake_case : str=8192 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[Any]=12 , _snake_case : str=3072 , _snake_case : str="gelu" , _snake_case : Any=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[int]=0.02 , _snake_case : Optional[int]=1E-12 , _snake_case : List[str]=224 , _snake_case : int=16 , _snake_case : List[str]=3 , _snake_case : Optional[Any]=False , _snake_case : List[str]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Dict=0.1 , _snake_case : int=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : str=[1, 2, 3, 6] , _snake_case : Optional[int]=True , _snake_case : Tuple=0.4 , _snake_case : Tuple=256 , _snake_case : Dict=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Optional[Any] , ) -> int: '''simple docstring''' super().__init__(**_snake_case ) a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_size a__ = hidden_act a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = initializer_range a__ = layer_norm_eps a__ = image_size a__ = patch_size a__ = num_channels a__ = use_mask_token a__ = use_absolute_position_embeddings a__ = use_relative_position_bias a__ = use_shared_relative_position_bias a__ = layer_scale_init_value a__ = drop_path_rate a__ = use_mean_pooling # decode head attributes (semantic segmentation) a__ = out_indices a__ = pool_scales # auxiliary head attributes (semantic segmentation) a__ = use_auxiliary_head a__ = auxiliary_loss_weight a__ = auxiliary_channels a__ = auxiliary_num_convs a__ = auxiliary_concat_input a__ = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE ( a ): """simple docstring""" a_ : Any =version.parse("1.11" ) @property def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _lowerCAmelCase ( self : str ) -> float: '''simple docstring''' return 1E-4
232
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @require_torch def lowercase_ ( self ) -> List[Any]: '''simple docstring''' # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched __lowerCamelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' __lowerCamelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' __lowerCamelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache __lowerCamelCase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task='fill-mask' , model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed __lowerCamelCase = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCamelCase = '1' __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def lowercase_ ( self ) -> Dict: '''simple docstring''' # python one-liner segments # this must be loaded before socket.socket is monkey-patched __lowerCamelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' __lowerCamelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' __lowerCamelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache __lowerCamelCase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task='fill-mask' , model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed __lowerCamelCase = self.get_env() __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def lowercase_ ( self ) -> List[str]: '''simple docstring''' # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched __lowerCamelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' __lowerCamelCase = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' __lowerCamelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed __lowerCamelCase = self.get_env() __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCamelCase = '1' __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = '\nfrom transformers import pipeline\n ' __lowerCamelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' __lowerCamelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' __lowerCamelCase = self.get_env() __lowerCamelCase = '1' __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )] __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = '\nfrom transformers import AutoModel\n ' __lowerCamelCase = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network __lowerCamelCase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed __lowerCamelCase = self.get_env() __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCamelCase = '1' __lowerCamelCase = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
167
__A = "Input must be a string of 8 numbers plus letter" __A = "TRWAGMYFPDXBNJZSQVHLCKE" def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool: """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = F"""Expected string as input, found {type(UpperCamelCase__ ).__name__}""" raise TypeError(UpperCamelCase__ ) __lowerCamelCase = spanish_id.replace('-' , '' ).upper() if len(UpperCamelCase__ ) != 9: raise ValueError(UpperCamelCase__ ) try: __lowerCamelCase = int(spanish_id_clean[0:8] ) __lowerCamelCase = spanish_id_clean[8] except ValueError as ex: raise ValueError(UpperCamelCase__ ) from ex if letter.isdigit(): raise ValueError(UpperCamelCase__ ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
167
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class _a ( unittest.TestCase ): """simple docstring""" A_ = inspect.getfile(accelerate.test_utils ) A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] ) A_ = ["""accelerate""", """launch"""] A_ = Path.home() / """.cache/huggingface/accelerate""" A_ = """default_config.yaml""" A_ = config_folder / config_file A_ = config_folder / """_default_config.yaml""" A_ = Path("""tests/test_configs""" ) @classmethod def _UpperCAmelCase ( cls ) -> Dict: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _UpperCAmelCase ( cls ) -> int: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _UpperCAmelCase ( self ) -> List[Any]: for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=_UpperCAmelCase ): execute_subprocess_async( self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() ) def _UpperCAmelCase ( self ) -> List[Any]: execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() ) class _a ( unittest.TestCase ): """simple docstring""" A_ = """test-tpu""" A_ = """us-central1-a""" A_ = """ls""" A_ = ["""accelerate""", """tpu-config"""] A_ = """cd /usr/share""" A_ = """tests/test_samples/test_command_file.sh""" A_ = """Running gcloud compute tpus tpu-vm ssh""" def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_UpperCAmelCase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
23
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
1
from __future__ import annotations def a (_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = sorted(numsa + numsa ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(len(_lowerCAmelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =[float(x) for x in input("""Enter the elements of first array: """).split()] __SCREAMING_SNAKE_CASE =[float(x) for x in input("""Enter the elements of second array: """).split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
89
from __future__ import annotations __SCREAMING_SNAKE_CASE ={ """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class __magic_name__ : '''simple docstring''' def __init__( self: List[Any] , _lowerCamelCase: dict[str, list[str]] , _lowerCamelCase: str ): SCREAMING_SNAKE_CASE_ = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = source_vertex def _A ( self: Tuple ): SCREAMING_SNAKE_CASE_ = {self.source_vertex} SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE_ = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = vertex queue.append(_lowerCamelCase ) def _A ( self: List[str] , _lowerCamelCase: str ): if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE_ = self.parent.get(_lowerCamelCase ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE_ = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(_lowerCamelCase ) return self.shortest_path(_lowerCamelCase ) + f"->{target_vertex}" if __name__ == "__main__": __SCREAMING_SNAKE_CASE =Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
89
1
from __future__ import annotations import math lowerCamelCase_ = '''2020.9.26''' lowerCamelCase_ = '''xcodz-dot, cclaus, dhruvmanila''' def __magic_name__ ( __a : int , __a : List[Any] , __a : List[Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' if not all(isinstance(_A , (float, int) ) for val in locals().values() ): UpperCamelCase__ = f"Input values must either be float or int: {list(locals().values() )}" raise TypeError(_A ) UpperCamelCase__ = ((x * distance) / (z + distance)) * scale UpperCamelCase__ = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def __magic_name__ ( __a : List[str] , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : Any ): '''simple docstring''' if not isinstance(_A , _A ): raise TypeError("""Axis must be a str""" ) UpperCamelCase__ = locals() del input_variables["axis"] if not all(isinstance(_A , (float, int) ) for val in input_variables.values() ): UpperCamelCase__ = ( """Input values except axis must either be float or int: """ f"{list(input_variables.values() )}" ) raise TypeError(_A ) UpperCamelCase__ = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCamelCase__ = x * math.cos(_A ) - y * math.sin(_A ) UpperCamelCase__ = y * math.cos(_A ) + x * math.sin(_A ) UpperCamelCase__ = z elif axis == "x": UpperCamelCase__ = y * math.cos(_A ) - z * math.sin(_A ) UpperCamelCase__ = z * math.cos(_A ) + y * math.sin(_A ) UpperCamelCase__ = x elif axis == "y": UpperCamelCase__ = x * math.cos(_A ) - z * math.sin(_A ) UpperCamelCase__ = z * math.cos(_A ) + x * math.sin(_A ) UpperCamelCase__ = y else: raise ValueError("""not a valid axis, choose one of \'x\', \'y\', \'z\'""" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }') print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
513
import re def __UpperCamelCase ( _A ): lowerCAmelCase_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(_A , _A ) ) if __name__ == "__main__": _A = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
431
0
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib __lowerCamelCase : Union[str, Any] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __lowerCamelCase : List[Any] = logging.WARNING def A_ ( ) -> Union[str, Any]: UpperCamelCase : Tuple = os.getenv("DATASETS_VERBOSITY" , lowerCAmelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def A_ ( ) -> str: return __name__.split("." )[0] def A_ ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def A_ ( ) -> None: UpperCamelCase : Union[str, Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def A_ ( ) -> None: UpperCamelCase : Union[str, Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def A_ ( _lowerCAmelCase = None ) -> logging.Logger: if name is None: UpperCamelCase : Optional[Any] = _get_library_name() return logging.getLogger(lowerCAmelCase__ ) def A_ ( ) -> int: return _get_library_root_logger().getEffectiveLevel() def A_ ( _lowerCAmelCase ) -> None: _get_library_root_logger().setLevel(lowerCAmelCase__ ) def A_ ( ) -> List[Any]: return set_verbosity(lowerCAmelCase__ ) def A_ ( ) -> List[Any]: return set_verbosity(lowerCAmelCase__ ) def A_ ( ) -> Union[str, Any]: return set_verbosity(lowerCAmelCase__ ) def A_ ( ) -> Optional[int]: return set_verbosity(lowerCAmelCase__ ) def A_ ( ) -> None: UpperCamelCase : Optional[int] = False def A_ ( ) -> None: UpperCamelCase : Optional[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class A__ : def __init__( self , *A_ , **A_ ): # pylint: disable=unused-argument '''simple docstring''' UpperCamelCase : int = args[0] if args else None def __iter__( self ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self , A_ ): '''simple docstring''' def empty_fn(*A_ , **A_ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): '''simple docstring''' return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' return __lowerCamelCase : Tuple = True class A__ : def __call__( self , *A_ , A_=False , **A_ ): '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase ) else: return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' UpperCamelCase : Any = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowerCamelCase : int = _tqdm_cls() def A_ ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def A_ ( ) -> Tuple: global _tqdm_active UpperCamelCase : Dict = True def A_ ( ) -> Dict: global _tqdm_active UpperCamelCase : int = False
629
from collections.abc import Sequence def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int: '''simple docstring''' if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) A = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): A = nums[i] A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __snake_case :str =int(input('Enter number of elements : ').strip()) __snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
106
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class __UpperCamelCase ( __lowerCAmelCase ): lowerCAmelCase_ = "audio-spectrogram-transformer" def __init__( self : Any , _A : str=768 , _A : int=12 , _A : Optional[Any]=12 , _A : str=3072 , _A : Any="gelu" , _A : List[Any]=0.0 , _A : Union[str, Any]=0.0 , _A : List[str]=0.02 , _A : Dict=1e-12 , _A : int=16 , _A : Optional[Any]=True , _A : List[Any]=10 , _A : Any=10 , _A : Dict=1024 , _A : Union[str, Any]=128 , **_A : str , ): """simple docstring""" super().__init__(**lowerCamelCase__ ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : int = num_attention_heads __SCREAMING_SNAKE_CASE : int = intermediate_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Tuple = initializer_range __SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps __SCREAMING_SNAKE_CASE : List[Any] = patch_size __SCREAMING_SNAKE_CASE : Dict = qkv_bias __SCREAMING_SNAKE_CASE : Optional[int] = frequency_stride __SCREAMING_SNAKE_CASE : str = time_stride __SCREAMING_SNAKE_CASE : Any = max_length __SCREAMING_SNAKE_CASE : str = num_mel_bins
718
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowercase_ = float("""nan""") class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = sys.stdout __SCREAMING_SNAKE_CASE : int = open(_A , '''a''' ) def __getattr__( self : int , _A : str ): """simple docstring""" return getattr(self.stdout , _A ) def UpperCAmelCase__ ( self : Dict , _A : Any ): """simple docstring""" self.stdout.write(_A ) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , _A , 0 , re.M ) ) def a__ ( snake_case=80 , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = [] # deal with critical env vars __SCREAMING_SNAKE_CASE : List[Any] = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: __SCREAMING_SNAKE_CASE : Any = os.environ.get(snake_case , snake_case ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __SCREAMING_SNAKE_CASE : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(snake_case ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __SCREAMING_SNAKE_CASE : Tuple = [] __SCREAMING_SNAKE_CASE : List[Any] = '''''' while len(snake_case ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = '''''' return "\\\n".join(snake_case ) def a__ ( snake_case , snake_case ): """simple docstring""" # unwrap multi-line input __SCREAMING_SNAKE_CASE : Dict = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own __SCREAMING_SNAKE_CASE : Any = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __SCREAMING_SNAKE_CASE : Any = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams __SCREAMING_SNAKE_CASE : Optional[int] = variation.replace(''' ''' , '''-''' ) with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: __SCREAMING_SNAKE_CASE : Any = json.load(snake_case ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : str = F'''{id}: {variation:<{longest_variation_len}}''' __SCREAMING_SNAKE_CASE : Optional[int] = F'''{preamble}: ''' __SCREAMING_SNAKE_CASE : Optional[Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ): __SCREAMING_SNAKE_CASE : str = process_run_single( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) __SCREAMING_SNAKE_CASE : List[str] = single_run_metrics[target_metric_key] if not math.isnan(snake_case ): metrics.append(snake_case ) results.append(snake_case ) outcome += "✓" else: outcome += "✘" __SCREAMING_SNAKE_CASE : str = F'''\33[2K\r{outcome}''' if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __SCREAMING_SNAKE_CASE : Optional[Any] = round(mean_metrics[target_metric_key] , 2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{outcome} {mean_target}''' if len(snake_case ) > 1: results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}''' print(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = variation return mean_metrics else: print(snake_case ) return {variation_key: variation, target_metric_key: nan} def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = pd.DataFrame(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = '''variation''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''diff_%''' __SCREAMING_SNAKE_CASE : str = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __SCREAMING_SNAKE_CASE : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case ): # as a fallback, use the minimal value as the sentinel __SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = df.apply( lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns __SCREAMING_SNAKE_CASE : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] __SCREAMING_SNAKE_CASE : Union[str, Any] = df.reindex(snake_case , axis='''columns''' ) # reorder cols # capitalize __SCREAMING_SNAKE_CASE : str = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible __SCREAMING_SNAKE_CASE : Any = df.rename(lambda snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) __SCREAMING_SNAKE_CASE : int = df.rename(lambda snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) __SCREAMING_SNAKE_CASE : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt='''.2f''' )] print('''\n\n'''.join(snake_case ) ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=snake_case , type=snake_case , required=snake_case , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=snake_case , type=snake_case , nargs='''+''' , required=snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=snake_case , type=snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=snake_case , type=snake_case , required=snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=snake_case , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : str = args.output_dir Path(snake_case ).mkdir(exist_ok=snake_case ) __SCREAMING_SNAKE_CASE : int = get_base_command(snake_case , snake_case ) # split each dimension into its --foo variations __SCREAMING_SNAKE_CASE : Optional[Any] = [list(map(str.strip , re.split(R'''\|''' , snake_case ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(str.strip , map(''' '''.join , itertools.product(*snake_case ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = max(len(snake_case ) for x in variations ) # split wanted keys __SCREAMING_SNAKE_CASE : List[Any] = args.report_metric_keys.split() # capture prints into a log file for convenience __SCREAMING_SNAKE_CASE : Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __SCREAMING_SNAKE_CASE : str = Tee(snake_case ) print(F'''\n*** Running {len(snake_case )} benchmarks:''' ) print(F'''Base command: {" ".join(snake_case )}''' ) __SCREAMING_SNAKE_CASE : str = '''variation''' __SCREAMING_SNAKE_CASE : Union[str, Any] = [] for id, variation in enumerate(tqdm(snake_case , desc='''Total completion: ''' , leave=snake_case ) ): __SCREAMING_SNAKE_CASE : int = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) ) process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case ) if __name__ == "__main__": main()
131
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' for attribute in key.split('.' ): SCREAMING_SNAKE_CASE__ : Dict = getattr(lowercase__ , lowercase__ ) if weight_type is not None: SCREAMING_SNAKE_CASE__ : List[str] = getattr(lowercase__ , lowercase__ ).shape else: SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": SCREAMING_SNAKE_CASE__ : str = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE__ : int = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE__ : Optional[int] = value elif weight_type == "bias": SCREAMING_SNAKE_CASE__ : List[str] = value else: SCREAMING_SNAKE_CASE__ : Tuple = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _a ( lowercase__ : Any , lowercase__ : str , lowercase__ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = [] SCREAMING_SNAKE_CASE__ : Dict = fairseq_model.state_dict() SCREAMING_SNAKE_CASE__ : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE__ : int = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): SCREAMING_SNAKE_CASE__ : int = True if "*" in mapped_key: SCREAMING_SNAKE_CASE__ : Dict = name.split(lowercase__ )[0].split('.' )[-2] SCREAMING_SNAKE_CASE__ : Optional[int] = mapped_key.replace('*' , lowercase__ ) if "weight_g" in name: SCREAMING_SNAKE_CASE__ : List[str] = 'weight_g' elif "weight_v" in name: SCREAMING_SNAKE_CASE__ : int = 'weight_v' elif "weight" in name: SCREAMING_SNAKE_CASE__ : int = 'weight' elif "bias" in name: SCREAMING_SNAKE_CASE__ : int = 'bias' else: SCREAMING_SNAKE_CASE__ : Any = None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = full_name.split('conv_layers.' )[-1] SCREAMING_SNAKE_CASE__ : List[Any] = name.split('.' ) SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] ) SCREAMING_SNAKE_CASE__ : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE__ : List[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) SCREAMING_SNAKE_CASE__ : Optional[int] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE__ : str = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[Any]=None , lowercase__ : Dict=None , lowercase__ : Dict=True ): '''simple docstring''' if config_path is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = HubertConfig.from_pretrained(lowercase__ ) else: SCREAMING_SNAKE_CASE__ : List[Any] = HubertConfig() if is_finetuned: if dict_path: SCREAMING_SNAKE_CASE__ : int = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq SCREAMING_SNAKE_CASE__ : List[str] = target_dict.pad_index SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index SCREAMING_SNAKE_CASE__ : Optional[Any] = len(target_dict.symbols ) SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(lowercase__ , 'vocab.json' ) if not os.path.isdir(lowercase__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) with open(lowercase__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase__ , ) SCREAMING_SNAKE_CASE__ : int = True if config.feat_extract_norm == 'layer' else False SCREAMING_SNAKE_CASE__ : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = HubertForCTC(lowercase__ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = HubertModel(lowercase__ ) if is_finetuned: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) SCREAMING_SNAKE_CASE__ : Tuple = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , lowercase__ ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
85
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan _lowerCamelCase : Optional[int] = 637_8137.0 _lowerCamelCase : Any = 635_6752.31_4245 _lowerCamelCase : Dict = 637_8137 def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> float: """simple docstring""" UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A UpperCamelCase = atan((1 - flattening) * tan(radians(A__ ) ) ) UpperCamelCase = atan((1 - flattening) * tan(radians(A__ ) ) ) UpperCamelCase = radians(A__ ) UpperCamelCase = radians(A__ ) # Equation UpperCamelCase = sin((phi_a - phi_a) / 2 ) UpperCamelCase = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda UpperCamelCase = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__=True , A__="pt" ) -> Tuple: """simple docstring""" UpperCamelCase = {'add_prefix_space': True} if isinstance(A__ , A__ ) and not line.startswith(' ' ) else {} UpperCamelCase = padding_side return tokenizer( [line] , max_length=A__ , padding='max_length' if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , ) def __lowerCamelCase ( A__ , A__ , A__=None , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = input_ids.ne(A__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str="train" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple="" , ): """simple docstring""" super().__init__() UpperCamelCase = Path(UpperCamelCase__ ).joinpath(type_path + '.source' ) UpperCamelCase = Path(UpperCamelCase__ ).joinpath(type_path + '.target' ) UpperCamelCase = self.get_char_lens(self.src_file ) UpperCamelCase = max_source_length UpperCamelCase = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" UpperCamelCase = tokenizer UpperCamelCase = prefix if n_obs is not None: UpperCamelCase = self.src_lens[:n_obs] UpperCamelCase = src_lang UpperCamelCase = tgt_lang def __len__( self : Any ): """simple docstring""" return len(self.src_lens ) def __getitem__( self : str , UpperCamelCase__ : Optional[Any] ): """simple docstring""" UpperCamelCase = index + 1 # linecache starts at 1 UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip('\n' ) UpperCamelCase = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip('\n' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCamelCase__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCamelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer ) UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer UpperCamelCase = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , 'right' ) UpperCamelCase = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , 'right' ) UpperCamelCase = source_inputs['input_ids'].squeeze() UpperCamelCase = target_inputs['input_ids'].squeeze() UpperCamelCase = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def A ( UpperCamelCase__ : Dict ): """simple docstring""" return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()] def A ( self : str , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = torch.stack([x['input_ids'] for x in batch] ) UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] ) UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] ) UpperCamelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) UpperCamelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) UpperCamelCase = trim_batch(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ ) UpperCamelCase = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch _lowerCamelCase : Optional[Any] = getLogger(__name__) def __lowerCamelCase ( A__ ) -> Optional[Any]: """simple docstring""" return list(itertools.chain.from_iterable(A__ ) ) def __lowerCamelCase ( A__ ) -> None: """simple docstring""" UpperCamelCase = get_git_info() save_json(A__ , os.path.join(A__ , 'git_log.json' ) ) def __lowerCamelCase ( A__ , A__ , A__=4 , **A__ ) -> List[str]: """simple docstring""" with open(A__ , 'w' ) as f: json.dump(A__ , A__ , indent=A__ , **A__ ) def __lowerCamelCase ( A__ ) -> Any: """simple docstring""" with open(A__ ) as f: return json.load(A__ ) def __lowerCamelCase ( ) -> str: """simple docstring""" UpperCamelCase = git.Repo(search_parent_directories=A__ ) UpperCamelCase = { 'repo_id': str(A__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def __lowerCamelCase ( A__ , A__ ) -> List: """simple docstring""" return list(map(A__ , A__ ) ) def __lowerCamelCase ( A__ , A__ ) -> List[Any]: """simple docstring""" with open(A__ , 'wb' ) as f: return pickle.dump(A__ , A__ ) def __lowerCamelCase ( A__ ) -> Optional[int]: """simple docstring""" def remove_articles(A__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , A__ ) def white_space_fix(A__ ): return " ".join(text.split() ) def remove_punc(A__ ): UpperCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(A__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) ) def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" UpperCamelCase = normalize_answer(A__ ).split() UpperCamelCase = normalize_answer(A__ ).split() UpperCamelCase = Counter(A__ ) & Counter(A__ ) UpperCamelCase = sum(common.values() ) if num_same == 0: return 0 UpperCamelCase = 1.0 * num_same / len(A__ ) UpperCamelCase = 1.0 * num_same / len(A__ ) UpperCamelCase = (2 * precision * recall) / (precision + recall) return fa def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" return normalize_answer(A__ ) == normalize_answer(A__ ) def __lowerCamelCase ( A__ , A__ ) -> Dict: """simple docstring""" assert len(A__ ) == len(A__ ) UpperCamelCase = 0 for hypo, pred in zip(A__ , A__ ): em += exact_match_score(A__ , A__ ) if len(A__ ) > 0: em /= len(A__ ) return {"em": em} def __lowerCamelCase ( A__ ) -> Tuple: """simple docstring""" return model_prefix.startswith('rag' ) def __lowerCamelCase ( A__ , A__ , A__ ) -> int: """simple docstring""" UpperCamelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCamelCase = 'dropout_rate' for p in extra_params: if getattr(A__ , A__ , A__ ): if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) ) delattr(A__ , A__ ) continue UpperCamelCase = p if hasattr(A__ , A__ ) else equivalent_param[p] setattr(A__ , A__ , getattr(A__ , A__ ) ) delattr(A__ , A__ ) return hparams, config
324
0
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class lowerCamelCase (tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : List[Any] , __UpperCAmelCase : float , __UpperCAmelCase : Callable , __UpperCAmelCase : int , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : str = None , ) -> List[str]: super().__init__() SCREAMING_SNAKE_CASE__ = initial_learning_rate SCREAMING_SNAKE_CASE__ = warmup_steps SCREAMING_SNAKE_CASE__ = power SCREAMING_SNAKE_CASE__ = decay_schedule_fn SCREAMING_SNAKE_CASE__ = name def __call__( self : List[Any] , __UpperCAmelCase : List[str] ) -> List[str]: with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. SCREAMING_SNAKE_CASE__ = tf.cast(__UpperCAmelCase , tf.floataa ) SCREAMING_SNAKE_CASE__ = tf.cast(self.warmup_steps , tf.floataa ) SCREAMING_SNAKE_CASE__ = global_step_float / warmup_steps_float SCREAMING_SNAKE_CASE__ = self.initial_learning_rate * tf.math.pow(__UpperCAmelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__UpperCAmelCase , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.0 , snake_case__ = 0.9 , snake_case__ = 0.9_99 , snake_case__ = 1e-8 , snake_case__ = None , snake_case__ = None , snake_case__ = 0.0 , snake_case__ = 1.0 , snake_case__ = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=snake_case__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=snake_case__ , ) if num_warmup_steps: SCREAMING_SNAKE_CASE__ = WarmUp( initial_learning_rate=snake_case__ , decay_schedule_fn=snake_case__ , warmup_steps=snake_case__ , ) if weight_decay_rate > 0.0: SCREAMING_SNAKE_CASE__ = AdamWeightDecay( learning_rate=snake_case__ , weight_decay_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=snake_case__ , ) else: SCREAMING_SNAKE_CASE__ = tf.keras.optimizers.Adam( learning_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class lowerCamelCase (A__ ): def __init__( self : Tuple , __UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , __UpperCAmelCase : float = 0.9 , __UpperCAmelCase : float = 0.999 , __UpperCAmelCase : float = 1e-7 , __UpperCAmelCase : bool = False , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "AdamWeightDecay" , **__UpperCAmelCase : Optional[int] , ) -> List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = weight_decay_rate SCREAMING_SNAKE_CASE__ = include_in_weight_decay SCREAMING_SNAKE_CASE__ = exclude_from_weight_decay @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , __UpperCAmelCase : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ = {"""WarmUp""": WarmUp} return super(__UpperCAmelCase , cls ).from_config(__UpperCAmelCase , custom_objects=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Union[str, Any]: super(__UpperCAmelCase , self )._prepare_local(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(zip(*__UpperCAmelCase ) ) return super(__UpperCAmelCase , self ).apply_gradients(zip(__UpperCAmelCase , __UpperCAmelCase ) , name=__UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ) -> Tuple: if apply_state is None: return self._decayed_lr_t[var_dtype], {} SCREAMING_SNAKE_CASE__ = apply_state or {} SCREAMING_SNAKE_CASE__ = apply_state.get((var_device, var_dtype) ) if coefficients is None: SCREAMING_SNAKE_CASE__ = self._fallback_apply_state(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str=None ) -> Tuple: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_lr(var.device , var.dtype.base_dtype , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self._decay_weights_op(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) with tf.control_dependencies([decay] ): return super(__UpperCAmelCase , self )._resource_apply_dense(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any]=None ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_lr(var.device , var.dtype.base_dtype , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self._decay_weights_op(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) with tf.control_dependencies([decay] ): return super(__UpperCAmelCase , self )._resource_apply_sparse(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> int: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(__UpperCAmelCase , __UpperCAmelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(__UpperCAmelCase , __UpperCAmelCase ) is not None: return False return True class lowerCamelCase (A__ ): def __init__( self : Any ) -> Tuple: SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = None @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: if self._accum_steps is None: SCREAMING_SNAKE_CASE__ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=__UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Any , __UpperCAmelCase : str ) -> Optional[int]: if not self._gradients: SCREAMING_SNAKE_CASE__ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(__UpperCAmelCase ) , trainable=__UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(__UpperCAmelCase ) != len(self._gradients ): raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__UpperCAmelCase )}""" ) for accum_gradient, gradient in zip(self._gradients , __UpperCAmelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(__UpperCAmelCase ) self._accum_steps.assign_add(1 ) def SCREAMING_SNAKE_CASE ( self : str ) -> Any: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(__UpperCAmelCase ) )
196
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[Any] = logging.get_logger(__name__) A_ : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class lowerCamelCase (A__ ): lowerCamelCase__ : Optional[int] = 'dpr' def __init__( self : Optional[int] , __UpperCAmelCase : List[str]=3_0_5_2_2 , __UpperCAmelCase : int=7_6_8 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : Any=3_0_7_2 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=5_1_2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Dict=1e-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Dict , ) -> Tuple: super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = projection_dim SCREAMING_SNAKE_CASE__ = position_embedding_type
196
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class UpperCamelCase ( lowercase_ ): lowercase = 'dpr' def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase = 0 ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = vocab_size lowercase_ : Dict = hidden_size lowercase_ : List[Any] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : List[str] = hidden_act lowercase_ : str = intermediate_size lowercase_ : Union[str, Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : str = max_position_embeddings lowercase_ : Any = type_vocab_size lowercase_ : List[str] = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Optional[Any] = projection_dim lowercase_ : Dict = position_embedding_type
477
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __SCREAMING_SNAKE_CASE =random.Random() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None ): if rng is None: lowercase_ : Union[str, Any] = global_rng lowercase_ : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=400 ,__UpperCamelCase=2000 ,__UpperCamelCase=10 ,__UpperCamelCase=160 ,__UpperCamelCase=8 ,__UpperCamelCase=0.0 ,__UpperCamelCase=4000 ,__UpperCamelCase=False ,__UpperCamelCase=True ,) -> List[str]: '''simple docstring''' lowercase_ : Tuple = parent lowercase_ : Optional[Any] = batch_size lowercase_ : Optional[int] = min_seq_length lowercase_ : List[Any] = max_seq_length lowercase_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase_ : Tuple = padding_value lowercase_ : Dict = sampling_rate lowercase_ : List[str] = return_attention_mask lowercase_ : str = do_normalize lowercase_ : str = feature_size lowercase_ : List[Any] = chunk_length lowercase_ : Optional[int] = hop_length def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _UpperCAmelCase ( self ,__UpperCamelCase=False ,__UpperCamelCase=False ) -> Union[str, Any]: '''simple docstring''' def _flatten(__UpperCamelCase ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: lowercase_ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase_ : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: lowercase_ : int = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = WhisperFeatureExtractor if is_speech_available() else None def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = WhisperFeatureExtractionTester(self ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[Any] = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) lowercase_ : Optional[Any] = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = feat_extract_first.to_dict() lowercase_ : int = feat_extract_second.to_dict() lowercase_ : List[Any] = feat_extract_first.mel_filters lowercase_ : Tuple = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'feat_extract.json' ) feat_extract_first.to_json_file(__UpperCamelCase ) lowercase_ : Dict = self.feature_extraction_class.from_json_file(__UpperCamelCase ) lowercase_ : Optional[Any] = feat_extract_first.to_dict() lowercase_ : Optional[Any] = feat_extract_second.to_dict() lowercase_ : List[str] = feat_extract_first.mel_filters lowercase_ : Tuple = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] lowercase_ : str = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,padding='max_length' ,return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input lowercase_ : List[str] = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features lowercase_ : List[str] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) # Test batched lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase_ : Optional[int] = np.asarray(__UpperCamelCase ) lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) # Test truncation required lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )] lowercase_ : Union[str, Any] = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] lowercase_ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs] lowercase_ : int = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features lowercase_ : Optional[Any] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' import torch lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : List[str] = np.random.rand(100 ,32 ).astype(np.floataa ) lowercase_ : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase_ : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase_ : Union[str, Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' ) # automatic decoding with librispeech lowercase_ : int = ds.sort('id' ).select(range(__UpperCamelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[Any] = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on lowercase_ : Optional[int] = self._load_datasamples(1 ) lowercase_ : Dict = WhisperFeatureExtractor() lowercase_ : str = feature_extractor(__UpperCamelCase ,return_tensors='pt' ).input_features self.assertEqual(input_features.shape ,(1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] ,__UpperCamelCase ,atol=1e-4 ) ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase_ : List[Any] = self._load_datasamples(1 )[0] lowercase_ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue lowercase_ : Tuple = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
477
1
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class A_ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=64 , snake_case=5 , snake_case=4 , snake_case=64 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def SCREAMING_SNAKE_CASE__ ( self ): return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = MPNetModel(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , snake_case ) lowercase = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = MPNetForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model( snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = MPNetForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_choices lowercase = MPNetForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = model( snake_case , attention_mask=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = MPNetForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs lowercase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Optional[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _UpperCamelCase : Optional[Any] = ( { """feature-extraction""": MPNetModel, """fill-mask""": MPNetForMaskedLM, """question-answering""": MPNetForQuestionAnswering, """text-classification""": MPNetForSequenceClassification, """token-classification""": MPNetForTokenClassification, """zero-shot""": MPNetForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Tuple = False _UpperCamelCase : List[str] = True def SCREAMING_SNAKE_CASE__ ( self ): lowercase = MPNetModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case ) @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): lowercase = MPNetModel.from_pretrained('microsoft/mpnet-base' ) lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase = model(snake_case )[0] lowercase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case ) lowercase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
84
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=_a ): _A : int = ['''torch''', '''torchsde'''] def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ): requires_backends(self ,["torch", "torchsde"] ) @classmethod def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Optional[int] ): requires_backends(cls ,["torch", "torchsde"] ) @classmethod def __UpperCamelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[str] ): requires_backends(cls ,["torch", "torchsde"] )
143
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase__ = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class __magic_name__ ( unittest.TestCase ): def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool , lowerCAmelCase__ : str = None , lowerCAmelCase__ : list = None ) -> List[Any]: UpperCAmelCase = None UpperCAmelCase = os.path.abspath(os.path.join("examples" , "by_feature" ) ) UpperCAmelCase = os.path.abspath("examples" ) for item in os.listdir(lowerCAmelCase__ ): if item not in EXCLUDE_EXAMPLES: UpperCAmelCase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ) and ".py" in item_path: with self.subTest( tested_script=lowerCAmelCase__ , feature_script=lowerCAmelCase__ , tested_section="main()" if parser_only else "training_function()" , ): UpperCAmelCase = compare_against_test( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase = "\n".join(lowerCAmelCase__ ) if special_strings is not None: for string in special_strings: UpperCAmelCase = diff.replace(lowerCAmelCase__ , "" ) self.assertEqual(lowerCAmelCase__ , "" ) def _UpperCamelCase ( self : str ) -> Dict: self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase__ ) self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase__ ) def _UpperCamelCase ( self : List[str] ) -> Optional[int]: UpperCAmelCase = os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) UpperCAmelCase = [ " " * 1_6 + "{\n\n", " " * 2_0 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 2_0 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 2_0 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 2_0 + "\"epoch\": epoch,\n\n", " " * 1_6 + "},\n\n", " " * 1_6 + "step=epoch,\n", " " * 1_2, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.one_complete_example("complete_cv_example.py" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __magic_name__ ( _snake_case ): UpperCAmelCase = False @classmethod def _UpperCamelCase ( cls : int ) -> Optional[Any]: super().setUpClass() UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _UpperCamelCase ( cls : Optional[Any] ) -> str: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def _UpperCamelCase ( self : str ) -> Optional[int]: UpperCAmelCase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() UpperCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def _UpperCamelCase ( self : str ) -> Optional[int]: UpperCAmelCase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) self.assertNotIn("epoch 0:" , lowerCAmelCase__ ) self.assertIn("epoch 1:" , lowerCAmelCase__ ) def _UpperCamelCase ( self : List[str] ) -> Optional[int]: UpperCAmelCase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) if torch.cuda.is_available(): UpperCAmelCase = torch.cuda.device_count() else: UpperCAmelCase = 1 if num_processes > 1: self.assertNotIn("epoch 0:" , lowerCAmelCase__ ) self.assertIn("epoch 1:" , lowerCAmelCase__ ) else: self.assertIn("epoch 0:" , lowerCAmelCase__ ) self.assertIn("epoch 1:" , lowerCAmelCase__ ) @slow def _UpperCamelCase ( self : str ) -> Dict: UpperCAmelCase = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) UpperCAmelCase = re.findall("({.+})" , lowerCAmelCase__ ) UpperCAmelCase = [r for r in results if "accuracy" in r][-1] UpperCAmelCase = ast.literal_eval(lowerCAmelCase__ ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def _UpperCamelCase ( self : List[str] ) -> Any: UpperCAmelCase = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdir: UpperCAmelCase = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "tracking" ) ) ) def _UpperCamelCase ( self : Optional[Any] ) -> Tuple: UpperCAmelCase = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def _UpperCamelCase ( self : List[Any] ) -> int: UpperCAmelCase = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
717
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCAmelCase__ = "src/diffusers" # Matches is_xxx_available() lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") lowerCAmelCase__ = "\n{0} = None\n" lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def _lowerCAmelCase( __A ): UpperCAmelCase = _re_backend.findall(__A ) if len(__A ) == 0: return None return "_and_".join(__A ) def _lowerCAmelCase( ): with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase = f.readlines() # Get to the point we do the actual imports for type checking UpperCAmelCase = 0 UpperCAmelCase = {} # Go through the end of the file while line_index < len(__A ): # If the line contains is_backend_available, we grab all objects associated with the `else` block UpperCAmelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 UpperCAmelCase = [] # Until we unindent, add backend objects to the list while line_index < len(__A ) and len(lines[line_index] ) > 1: UpperCAmelCase = lines[line_index] UpperCAmelCase = _re_single_line_import.search(__A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__A ) > 0: UpperCAmelCase = objects else: line_index += 1 return backend_specific_objects def _lowerCAmelCase( __A , __A ): if name.isupper(): return DUMMY_CONSTANT.format(__A ) elif name.islower(): return DUMMY_FUNCTION.format(__A , __A ) else: return DUMMY_CLASS.format(__A , __A ) def _lowerCAmelCase( __A=None ): if backend_specific_objects is None: UpperCAmelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename UpperCAmelCase = {} for backend, objects in backend_specific_objects.items(): UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]" UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] ) UpperCAmelCase = dummy_file return dummy_files def _lowerCAmelCase( __A=False ): UpperCAmelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py UpperCAmelCase = {"torch": "pt"} # Locate actual dummy modules and read their content. UpperCAmelCase = os.path.join(__A , "utils" ) UpperCAmelCase = { backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" ) for backend in dummy_files.keys() } UpperCAmelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__A ): with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowerCAmelCase__ = parser.parse_args() check_dummies(args.fix_and_overwrite)
1
0
def a__ ( _UpperCamelCase : list[int] ,_UpperCamelCase : str ): __lowerCamelCase = int(_UpperCamelCase ) # Initialize Result __lowerCamelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCamelCase ): # Find denominations while int(_UpperCamelCase ) >= int(_UpperCamelCase ): total_value -= int(_UpperCamelCase ) answer.append(_UpperCamelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": a_ = [] a_ = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): a_ = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) a_ = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter a_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] a_ = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(f"Following is minimal change for {value}: ") a_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
175
import os import sys import unittest a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ = os.path.join(git_repo_path, """src""", """transformers""") a_ = """ {0} = None """ a_ = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(__UpperCAmelCase ) __lowerCamelCase = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(__UpperCAmelCase , '''tokenizers''' ) __lowerCamelCase = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(__UpperCAmelCase , '''tensorflow_text''' ) __lowerCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers''' ) __lowerCamelCase = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' ) __lowerCamelCase = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(__UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __UpperCAmelCase ) self.assertIn('''tensorflow_text''' , __UpperCAmelCase ) self.assertIn('''sentencepiece_and_tokenizers''' , __UpperCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , '''\nCONSTANT = None\n''' ) __lowerCamelCase = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( __UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __lowerCamelCase = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __lowerCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __lowerCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , __UpperCAmelCase )
175
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Union[str, Any] = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[int] = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
696
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ : Dict = "pt" elif is_tf_available(): A_ : Union[str, Any] = "tf" else: A_ : List[str] = "jax" class a_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer lowerCamelCase__ : Optional[Any] = False def a__ (self ): '''simple docstring''' super().setUp() lowerCamelCase__ : int = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a__ (self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def a__ (self, **lowerCamelCase_ ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ ) def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = [] for i in range(len(lowerCamelCase_ ) ): try: lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) ) lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) ) if max_length is not None and len(lowerCamelCase_ ) > max_length: lowerCamelCase__ : int = toks[:max_length] if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0: while len(lowerCamelCase_ ) < min_length: lowerCamelCase__ : Dict = toks + toks # toks_str = [t[1] for t in toks] lowerCamelCase__ : int = [t[0] for t in toks] # Ensure consistency lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ ) if " " not in output_txt and len(lowerCamelCase_ ) > 1: lowerCamelCase__ : List[Any] = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ ) + ' ' + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ ) ) if with_prefix_space: lowerCamelCase__ : Optional[Any] = ' ' + output_txt lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) return output_txt, output_ids def a__ (self ): '''simple docstring''' lowerCamelCase__ : Any = self.perceiver_tokenizer lowerCamelCase__ : Union[str, Any] = 'Unicode €.' lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ ) lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'], lowerCamelCase_ ) # decoding lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' ) lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' ) lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'], lowerCamelCase_ ) # decoding lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.perceiver_tokenizer lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) if FRAMEWORK != "jax": lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] ) else: lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowerCamelCase_, lowerCamelCase_ ) self.assertEqual((2, 3_8), batch.input_ids.shape ) self.assertEqual((2, 3_8), batch.attention_mask.shape ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Tuple = self.perceiver_tokenizer lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids', lowerCamelCase_ ) self.assertIn('attention_mask', lowerCamelCase_ ) self.assertNotIn('decoder_input_ids', lowerCamelCase_ ) self.assertNotIn('decoder_attention_mask', lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.perceiver_tokenizer lowerCamelCase__ : int = [ 'Summary of the text.', 'Another summary.', ] lowerCamelCase__ : str = tokenizer( text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ ) self.assertEqual(3_2, targets['input_ids'].shape[1] ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 4_2 ) # Now let's start the test lowerCamelCase__ : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : Any = tempfile.mkdtemp() lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running' lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ ) lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_, lowerCamelCase_ ) shutil.rmtree(lowerCamelCase_ ) lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : Any = tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ ) lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_, lowerCamelCase_ ) self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 4_2 ) lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length, 4_3 ) shutil.rmtree(lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file: lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file: lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ ) lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )] lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] lowerCamelCase__ : List[str] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(lowerCamelCase_, lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(lowerCamelCase_, lowerCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase__ : Dict = tokenizer_class.from_pretrained( lowerCamelCase_, ) self.assertIn( 'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )] lowerCamelCase__ : Any = tokenizer_class.from_pretrained( lowerCamelCase_, additional_special_tokens=lowerCamelCase_, ) self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ), '�' ) def a__ (self ): '''simple docstring''' pass def a__ (self ): '''simple docstring''' pass def a__ (self ): '''simple docstring''' pass def a__ (self ): '''simple docstring''' pass def a__ (self ): '''simple docstring''' lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
696
1
'''simple docstring''' import math import sys def _snake_case ( A ) -> str: lowerCAmelCase__ = '''''' try: with open(A , '''rb''' ) as binary_file: lowerCAmelCase__ = binary_file.read() for dat in data: lowerCAmelCase__ = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = {'''0''': '''0''', '''1''': '''1'''} lowerCAmelCase__ , lowerCAmelCase__ = '''''', '''''' lowerCAmelCase__ = len(A ) for i in range(len(A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase__ = lexicon[curr_string] result += last_match_id lowerCAmelCase__ = last_match_id + '''0''' if math.loga(A ).is_integer(): lowerCAmelCase__ = {} for curr_key in list(A ): lowerCAmelCase__ = lexicon.pop(A ) lowerCAmelCase__ = new_lex lowerCAmelCase__ = last_match_id + '''1''' index += 1 lowerCAmelCase__ = '''''' return result def _snake_case ( A , A ) -> None: lowerCAmelCase__ = 8 try: with open(A , '''wb''' ) as opened_file: lowerCAmelCase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A ) , A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase__ = data_bits[counter:] lowerCAmelCase__ = data_bits[counter + 1 :] return data_bits def _snake_case ( A , A ) -> None: lowerCAmelCase__ = read_file_binary(A ) lowerCAmelCase__ = remove_prefix(A ) lowerCAmelCase__ = decompress_data(A ) write_file_binary(A , A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
90
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class lowercase ( unittest.TestCase ): def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]: _A : List[Any] = parent _A : List[Any] = batch_size _A : Dict = seq_length _A : Optional[Any] = is_training _A : int = use_attention_mask _A : int = use_token_type_ids _A : List[Any] = use_labels _A : List[str] = vocab_size _A : List[Any] = hidden_size _A : str = num_hidden_layers _A : Optional[Any] = num_attention_heads _A : List[Any] = intermediate_size _A : Any = hidden_act _A : int = hidden_dropout_prob _A : int = attention_probs_dropout_prob _A : List[str] = max_position_embeddings _A : Optional[int] = type_vocab_size _A : List[str] = type_sequence_label_size _A : Dict = initializer_range _A : List[Any] = num_choices def a__ ( self ) -> int: _A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : Optional[Any] = None if self.use_attention_mask: _A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _A : Optional[int] = None if self.use_token_type_ids: _A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A : Optional[int] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a__ ( self ) -> List[str]: _A : Tuple = self.prepare_config_and_inputs() _A , _A , _A , _A : str = config_and_inputs _A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def a__ ( self ) -> int: _A : Any = self.prepare_config_and_inputs() _A , _A , _A , _A : int = config_and_inputs _A : int = True _A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = True _a = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = FlaxRobertaModelTester(self ) @slow def a__ ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: _A : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_a ) _A : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a )
307
0
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL A__: Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : tuple ,_UpperCAmelCase : Path ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int=False ,) -> List[Any]: output_path.parent.mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,use_external_data_format=_UpperCAmelCase ,enable_onnx_checker=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,) else: export( _UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> List[Any]: _a : List[str] =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _a : Tuple ="""cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _a : str ="""cpu""" _a : Any =Path(_UpperCAmelCase ) # VAE DECODER _a : Optional[Any] =AutoencoderKL.from_pretrained(model_path + """/vae""" ) _a : List[str] =vae_decoder.config.latent_channels # forward only through the decoder part _a : str =vae_decoder.decode onnx_export( _UpperCAmelCase ,model_args=( torch.randn(1 ,_UpperCAmelCase ,25 ,25 ).to(device=_UpperCAmelCase ,dtype=_UpperCAmelCase ), False, ) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } ,opset=_UpperCAmelCase ,) del vae_decoder if __name__ == "__main__": A__: Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') A__: List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('''SD: Done: ONNX''')
713
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class A__ ( UpperCAmelCase__ ): __UpperCamelCase : jnp.ndarray @flax_register_to_config class A__ ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCamelCase : int = 32 __UpperCamelCase : int = 4 __UpperCamelCase : int = 4 __UpperCamelCase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __UpperCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __UpperCamelCase : Union[bool, Tuple[bool]] = False __UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280) __UpperCamelCase : int = 2 __UpperCamelCase : Union[int, Tuple[int]] = 8 __UpperCamelCase : Optional[Union[int, Tuple[int]]] = None __UpperCamelCase : int = 1280 __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = False __UpperCamelCase : jnp.dtype = jnp.floataa __UpperCamelCase : bool = True __UpperCamelCase : int = 0 __UpperCamelCase : bool = False def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' # init input tensors _a : List[str] =(1, self.in_channels, self.sample_size, self.sample_size) _a : Tuple =jnp.zeros(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) _a : int =jnp.ones((1,) , dtype=jnp.intaa ) _a : Dict =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _a , _a : List[str] =jax.random.split(SCREAMING_SNAKE_CASE ) _a : int ={"""params""": params_rng, """dropout""": dropout_rng} return self.init(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["params"] def __UpperCAmelCase ( self :Dict ) -> str: '''simple docstring''' _a : Optional[int] =self.block_out_channels _a : Union[str, Any] =block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( """At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _a : int =self.num_attention_heads or self.attention_head_dim # input _a : Tuple =nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _a : Union[str, Any] =FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _a : Any =FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE , dtype=self.dtype ) _a : Any =self.only_cross_attention if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : str =(only_cross_attention,) * len(self.down_block_types ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : Any =(num_attention_heads,) * len(self.down_block_types ) # down _a : Union[str, Any] =[] _a : Dict =block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _a : Optional[Any] =output_channel _a : str =block_out_channels[i] _a : Optional[int] =i == len(SCREAMING_SNAKE_CASE ) - 1 if down_block_type == "CrossAttnDownBlock2D": _a : List[Any] =FlaxCrossAttnDownBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _a : int =FlaxDownBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(SCREAMING_SNAKE_CASE ) _a : Optional[Any] =down_blocks # mid _a : Tuple =FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _a : List[Any] =[] _a : Tuple =list(reversed(SCREAMING_SNAKE_CASE ) ) _a : str =list(reversed(SCREAMING_SNAKE_CASE ) ) _a : Dict =list(reversed(SCREAMING_SNAKE_CASE ) ) _a : List[str] =reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _a : Any =output_channel _a : Optional[int] =reversed_block_out_channels[i] _a : str =reversed_block_out_channels[min(i + 1 , len(SCREAMING_SNAKE_CASE ) - 1 )] _a : Optional[int] =i == len(SCREAMING_SNAKE_CASE ) - 1 if up_block_type == "CrossAttnUpBlock2D": _a : Union[str, Any] =FlaxCrossAttnUpBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _a : Tuple =FlaxUpBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(SCREAMING_SNAKE_CASE ) _a : Optional[int] =output_channel _a : Tuple =up_blocks # out _a : Any =nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 ) _a : List[str] =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' # 1. time if not isinstance(SCREAMING_SNAKE_CASE , jnp.ndarray ): _a : Dict =jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0: _a : Optional[int] =timesteps.astype(dtype=jnp.floataa ) _a : str =jnp.expand_dims(SCREAMING_SNAKE_CASE , 0 ) _a : Any =self.time_proj(SCREAMING_SNAKE_CASE ) _a : int =self.time_embedding(SCREAMING_SNAKE_CASE ) # 2. pre-process _a : Tuple =jnp.transpose(SCREAMING_SNAKE_CASE , (0, 2, 3, 1) ) _a : Tuple =self.conv_in(SCREAMING_SNAKE_CASE ) # 3. down _a : Tuple =(sample,) for down_block in self.down_blocks: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a , _a : Any =down_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=not train ) else: _a , _a : str =down_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _a : Union[str, Any] =() for down_block_res_sample, down_block_additional_residual in zip( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _a : str =new_down_block_res_samples # 4. mid _a : List[str] =self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _a : List[Any] =down_block_res_samples[-(self.layers_per_block + 1) :] _a : Tuple =down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _a : int =up_block( SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , res_hidden_states_tuple=SCREAMING_SNAKE_CASE , deterministic=not train , ) else: _a : Dict =up_block(SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE , res_hidden_states_tuple=SCREAMING_SNAKE_CASE , deterministic=not train ) # 6. post-process _a : str =self.conv_norm_out(SCREAMING_SNAKE_CASE ) _a : List[str] =nn.silu(SCREAMING_SNAKE_CASE ) _a : Optional[int] =self.conv_out(SCREAMING_SNAKE_CASE ) _a : Any =jnp.transpose(SCREAMING_SNAKE_CASE , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=SCREAMING_SNAKE_CASE )
506
0
"""simple docstring""" def lowerCamelCase_ ( _lowerCamelCase : int = 1_0 , _lowerCamelCase : int = 2_2 ): lowerCamelCase_ = range(1 , _lowerCamelCase ) lowerCamelCase_ = range(1 , _lowerCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
142
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase : """simple docstring""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=10 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[1, 1, 2, 1] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Dict: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = embeddings_size lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_act lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = len(UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> int: '''simple docstring''' lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ) -> Dict: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = TFRegNetModel(config=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , training=UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFRegNetForImageClassification(UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __lowercase :Union[str, Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __lowercase :Optional[int] = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) __lowercase :List[str] = False __lowercase :List[str] = False __lowercase :List[Any] = False __lowercase :Dict = False __lowercase :List[str] = False def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = TFRegNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _lowerCAmelCase ( self ) -> Any: '''simple docstring''' pass def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase_ = layer_type lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__={} ): lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__ , UpperCamelCase__ ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(UpperCamelCase__ , UpperCamelCase__ ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} ) def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFRegNetModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( ): lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' ) # forward pass lowerCamelCase_ = model(**UpperCamelCase__ , training=UpperCamelCase__ ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase_ = tf.constant([-0.4_180, -1.5_051, -3.4_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
142
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[Any] = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class _a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" A_ = 'glpn' def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[32, 64, 160, 256] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[1, 2, 5, 8] , _UpperCAmelCase=[4, 4, 4, 4] , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=64 , _UpperCAmelCase=10 , _UpperCAmelCase=-1 , **_UpperCAmelCase , ) -> List[Any]: super().__init__(**_a ) UpperCamelCase_ = num_channels UpperCamelCase_ = num_encoder_blocks UpperCamelCase_ = depths UpperCamelCase_ = sr_ratios UpperCamelCase_ = hidden_sizes UpperCamelCase_ = patch_sizes UpperCamelCase_ = strides UpperCamelCase_ = mlp_ratios UpperCamelCase_ = num_attention_heads UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = initializer_range UpperCamelCase_ = drop_path_rate UpperCamelCase_ = layer_norm_eps UpperCamelCase_ = decoder_hidden_size UpperCamelCase_ = max_depth UpperCamelCase_ = head_in_index
721
import os from math import logaa def _snake_case (__lowercase = "base_exp.txt"): UpperCamelCase_ = 0 UpperCamelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))): UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(','))) if x * logaa(__lowercase) > largest: UpperCamelCase_ = x * logaa(__lowercase) UpperCamelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
618
0
from manim import * class _a ( A__ ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase =Rectangle(height=0.25 , width=0.25 ) _UpperCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _UpperCAmelCase =[mem.copy() for i in range(6 )] _UpperCAmelCase =[mem.copy() for i in range(6 )] _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =Text("CPU" , font_size=24 ) _UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_snake_case ) _UpperCAmelCase =[mem.copy() for i in range(4 )] _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =Text("GPU" , font_size=24 ) _UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) gpu.move_to([-1, -1, 0] ) self.add(_snake_case ) _UpperCAmelCase =[mem.copy() for i in range(6 )] _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =Text("Model" , font_size=24 ) _UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) model.move_to([3, -1.0, 0] ) self.add(_snake_case ) _UpperCAmelCase =[] _UpperCAmelCase =[] _UpperCAmelCase =[] for i, rect in enumerate(_snake_case ): rect.set_stroke(_snake_case ) _UpperCAmelCase =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=_snake_case , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=_snake_case , buff=0.0 ) self.add(_snake_case ) model_cpu_arr.append(_snake_case ) self.add(*_snake_case , *_snake_case , *_snake_case ) _UpperCAmelCase =[mem.copy() for i in range(6 )] _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =Text("Loaded Checkpoint" , font_size=24 ) _UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) checkpoint.move_to([3, 0.5, 0] ) self.add(_snake_case ) _UpperCAmelCase =[] _UpperCAmelCase =[] for i, rect in enumerate(_snake_case ): _UpperCAmelCase =fill.copy().set_fill(_snake_case , opacity=0.7 ) target.move_to(_snake_case ) ckpt_arr.append(_snake_case ) _UpperCAmelCase =target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_snake_case ) self.add(*_snake_case , *_snake_case ) _UpperCAmelCase =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase =MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_snake_case , _snake_case ) _UpperCAmelCase =MarkupText( F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(_snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_snake_case ) _UpperCAmelCase =MarkupText( F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , ) step_a.move_to([2, 2, 0] ) _UpperCAmelCase =[meta_mem.copy() for i in range(6 )] _UpperCAmelCase =[meta_mem.copy() for i in range(6 )] _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 ) _UpperCAmelCase =Text("Disk" , font_size=24 ) _UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_snake_case , run_time=3 ) , Write(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) ) _UpperCAmelCase =[] for i, rect in enumerate(_snake_case ): _UpperCAmelCase =rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_snake_case , run_time=1.5 ) ) self.play(*_snake_case ) self.play(FadeOut(_snake_case ) ) _UpperCAmelCase =MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_snake_case , run_time=3 ) ) self.play( FadeOut(_snake_case , _snake_case , *_snake_case , *_snake_case ) , ) self.wait()
408
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Dict = logging.get_logger(__name__) snake_case__ : Any = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class _a ( A__ ): """simple docstring""" snake_case ="""mobilenet_v2""" def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ): super().__init__(**_snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _UpperCAmelCase =num_channels _UpperCAmelCase =image_size _UpperCAmelCase =depth_multiplier _UpperCAmelCase =depth_divisible_by _UpperCAmelCase =min_depth _UpperCAmelCase =expand_ratio _UpperCAmelCase =output_stride _UpperCAmelCase =first_layer_is_expansion _UpperCAmelCase =finegrained_output _UpperCAmelCase =hidden_act _UpperCAmelCase =tf_padding _UpperCAmelCase =classifier_dropout_prob _UpperCAmelCase =initializer_range _UpperCAmelCase =layer_norm_eps _UpperCAmelCase =semantic_loss_ignore_index class _a ( A__ ): """simple docstring""" snake_case =version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE ( self ): return OrderedDict([("pixel_values", {0: "batch"})] ) @property def SCREAMING_SNAKE_CASE ( self ): if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def SCREAMING_SNAKE_CASE ( self ): return 1E-4
408
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def A ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' ) if "model" in sd.keys(): UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model'''] # pop unnecessary weights UpperCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(__UpperCAmelCase ) UpperCAmelCase_ = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: UpperCAmelCase_ = sd.pop(__UpperCAmelCase ) UpperCAmelCase_ = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: UpperCAmelCase_ = sd[key] # We split QKV in separate Q,K,V UpperCAmelCase_ = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) UpperCAmelCase_ = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) UpperCAmelCase_ = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) UpperCAmelCase_ = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__UpperCAmelCase , depth // 3 , dim=0 ) UpperCAmelCase_ = q UpperCAmelCase_ = k UpperCAmelCase_ = v del sd[key] return sd @torch.no_grad() def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = load_checkpoint(__UpperCAmelCase ) if config is not None: UpperCAmelCase_ = OPTConfig.from_pretrained(__UpperCAmelCase ) else: UpperCAmelCase_ = OPTConfig() UpperCAmelCase_ = OPTModel(__UpperCAmelCase ).half().eval() model.load_state_dict(__UpperCAmelCase ) # Check results Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") UpperCamelCase_ = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
561
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCamelCase_ = 16 UpperCamelCase_ = 32 def A ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return int(x / 2**20 ) class a_ : def __enter__( self :Any) -> str: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero UpperCAmelCase_ = torch.cuda.memory_allocated() return self def __exit__( self :Tuple , *_lowercase :Tuple) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() UpperCAmelCase_ = torch.cuda.memory_allocated() UpperCAmelCase_ = torch.cuda.max_memory_allocated() UpperCAmelCase_ = bamb(self.end - self.begin) UpperCAmelCase_ = bamb(self.peak - self.begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def A ( __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCAmelCase ) UpperCAmelCase_ = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} ) def tokenize_function(__UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) UpperCAmelCase_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) return train_dataloader, eval_dataloader def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' UpperCAmelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config['''lr'''] UpperCAmelCase_ = int(config['''num_epochs'''] ) UpperCAmelCase_ = int(config['''seed'''] ) UpperCAmelCase_ = int(config['''batch_size'''] ) UpperCAmelCase_ = args.model_name_or_path set_seed(__UpperCAmelCase ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase ) # Instantiate optimizer UpperCAmelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ = 1 UpperCAmelCase_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , ) else: UpperCAmelCase_ = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ = 0 # Now we train the model UpperCAmelCase_ = {} for epoch in range(__UpperCAmelCase , __UpperCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(__UpperCAmelCase ): UpperCAmelCase_ = model(**__UpperCAmelCase ) UpperCAmelCase_ = outputs.loss UpperCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(__UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def A ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , ) parser.add_argument( '''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
561
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = DebertaVaTokenizer _snake_case : Optional[int] = DebertaVaTokenizerFast _snake_case : List[Any] = True _snake_case : Dict = True def __a ( self :List[str] ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase__ :Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self :Tuple , lowerCamelCase__ :Dict ): UpperCamelCase__ :Tuple = 'this is a test' UpperCamelCase__ :Optional[Any] = 'this is a test' return input_text, output_text def __a ( self :Optional[int] ): UpperCamelCase__ :int = '<pad>' UpperCamelCase__ :Optional[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __a ( self :Optional[int] ): UpperCamelCase__ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(__lowerCamelCase ) , 3_00_01 ) def __a ( self :Optional[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def __a ( self :List[Any] ): # fmt: off UpperCamelCase__ :List[Any] = ' \tHeLLo!how \n Are yoU? ' UpperCamelCase__ :str = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on UpperCamelCase__ :str = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase ) UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase ) UpperCamelCase__ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __a ( self :List[Any] ): pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __a ( self :str ): pass def __a ( self :List[str] ): # fmt: off UpperCamelCase__ :int = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :Optional[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase__ :str = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Any = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :Any ): # fmt: off UpperCamelCase__ :Dict = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :Optional[int] ): # fmt: off UpperCamelCase__ :Dict = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :Union[str, Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Optional[Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :Any ): # fmt: off UpperCamelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :List[Any] ): # fmt: off UpperCamelCase__ :List[str] = ' \tHeLLo!how \n Are yoU? ' UpperCamelCase__ :Union[str, Any] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on UpperCamelCase__ :str = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase ) UpperCamelCase__ :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :str ): UpperCamelCase__ :Any = self.get_tokenizer() UpperCamelCase__ :Optional[int] = self.get_rust_tokenizer() UpperCamelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) UpperCamelCase__ :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCamelCase__ :Optional[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Optional[int] = self.get_rust_tokenizer() UpperCamelCase__ :Union[str, Any] = tokenizer.encode(__lowerCamelCase ) UpperCamelCase__ :Dict = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :Any ): UpperCamelCase__ :Dict = 'This is a test' UpperCamelCase__ :str = [13, 1, 43_98, 25, 21, 12_89] UpperCamelCase__ :Union[str, Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] UpperCamelCase__ :Any = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] UpperCamelCase__ :Union[str, Any] = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) UpperCamelCase__ :List[Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase ) UpperCamelCase__ :Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :str = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :List[str] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :List[str] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :str = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :List[Any] = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # fmt: off UpperCamelCase__ :List[Any] = 'I was born in 92000, and this is falsé.' UpperCamelCase__ :Optional[Any] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] UpperCamelCase__ :str = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] UpperCamelCase__ :Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on UpperCamelCase__ :int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Dict = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Dict = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :str = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase__ :Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __a ( self :int ): UpperCamelCase__ :List[str] = DebertaVaTokenizer(__lowerCamelCase ) UpperCamelCase__ :Dict = tokenizer.encode("""sequence builders""" ) UpperCamelCase__ :Any = tokenizer.encode("""multi-sequence build""" ) UpperCamelCase__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) UpperCamelCase__ :List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , ) @slow def __a ( self :Union[str, Any] ): # fmt: off UpperCamelCase__ :List[str] = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
45
'''simple docstring''' def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str: snake_case__ , snake_case__ : Optional[int] = [], [] while len(__SCREAMING_SNAKE_CASE ) > 1: snake_case__ , snake_case__ : Tuple = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE ) start.append(__SCREAMING_SNAKE_CASE ) end.append(__SCREAMING_SNAKE_CASE ) collection.remove(__SCREAMING_SNAKE_CASE ) collection.remove(__SCREAMING_SNAKE_CASE ) end.reverse() return start + collection + end if __name__ == "__main__": A_ = input("Enter numbers separated by a comma:\n").strip() A_ = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
270
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self :Any ,__UpperCAmelCase :Any ) -> str: """simple docstring""" lowerCamelCase__ : Any = data lowerCamelCase__ : Node | None = None class __SCREAMING_SNAKE_CASE : def __init__( self :Any ) -> str: """simple docstring""" lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : Optional[int] = None def __iter__( self :Dict ) -> Iterator[Any]: """simple docstring""" lowerCamelCase__ : List[Any] = self.head while self.head: yield node.data lowerCamelCase__ : int = node.next if node == self.head: break def __len__( self :Union[str, Any] ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self :Any ) -> List[Any]: """simple docstring""" return "->".join(str(__UpperCAmelCase ) for item in iter(self ) ) def lowercase_ ( self :List[str] ,__UpperCAmelCase :Any ) -> None: """simple docstring""" self.insert_nth(len(self ) ,__UpperCAmelCase ) def lowercase_ ( self :List[str] ,__UpperCAmelCase :Any ) -> None: """simple docstring""" self.insert_nth(0 ,__UpperCAmelCase ) def lowercase_ ( self :Dict ,__UpperCAmelCase :int ,__UpperCAmelCase :Any ) -> None: """simple docstring""" if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) lowerCamelCase__ : Dict = Node(__UpperCAmelCase ) if self.head is None: lowerCamelCase__ : Any = new_node # first node points itself lowerCamelCase__ : Union[str, Any] = new_node elif index == 0: # insert at head lowerCamelCase__ : int = self.head lowerCamelCase__ : Optional[int] = new_node else: lowerCamelCase__ : int = self.head for _ in range(index - 1 ): lowerCamelCase__ : Optional[Any] = temp.next lowerCamelCase__ : Union[str, Any] = temp.next lowerCamelCase__ : int = new_node if index == len(self ) - 1: # insert at tail lowerCamelCase__ : Tuple = new_node def lowercase_ ( self :str ) -> str: """simple docstring""" return self.delete_nth(0 ) def lowercase_ ( self :Optional[int] ) -> Any: """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowercase_ ( self :Any ,__UpperCAmelCase :int = 0 ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) lowerCamelCase__ : Union[str, Any] = self.head if self.head == self.tail: # just one node lowerCamelCase__ : List[str] = None elif index == 0: # delete head node lowerCamelCase__ : Tuple = self.tail.next.next lowerCamelCase__ : Any = self.head.next else: lowerCamelCase__ : List[Any] = self.head for _ in range(index - 1 ): lowerCamelCase__ : Union[str, Any] = temp.next lowerCamelCase__ : List[str] = temp.next lowerCamelCase__ : Any = temp.next.next if index == len(self ) - 1: # delete at tail lowerCamelCase__ : List[Any] = temp return delete_node.data def lowercase_ ( self :Optional[Any] ) -> bool: """simple docstring""" return len(self ) == 0 def __a ( ): """simple docstring""" lowerCamelCase__ : int = CircularLinkedList() assert len(_lowercase ) == 0 assert circular_linked_list.is_empty() is True assert str(_lowercase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_lowercase ) == i circular_linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
121
"""simple docstring""" import requests UpperCAmelCase : str = "YOUR API KEY" def __a ( _lowercase , _lowercase = giphy_api_key ): """simple docstring""" lowerCamelCase__ : Optional[int] = '''+'''.join(query.split() ) lowerCamelCase__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" lowerCamelCase__ : Dict = requests.get(_lowercase ).json()['''data'''] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("\n".join(get_gifs("space ship")))
121
1
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase (_lowerCAmelCase , _lowerCAmelCase ): # Load checkpoint __lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" ) __lowerCAmelCase = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository __lowerCAmelCase = {} for k, v in state_dict.items(): if "pred_layer" in k: __lowerCAmelCase = v else: __lowerCAmelCase = v __lowerCAmelCase = chkpt["""params"""] __lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(_lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )} __lowerCAmelCase = chkpt["""dico_word2id"""] __lowerCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()} # Save pytorch-model __lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME __lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(_lowerCAmelCase , _lowerCAmelCase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_lowerCAmelCase , indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_lowerCAmelCase , indent=2 ) + """\n""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
465
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = '''ybelkada/fonts''' def lowercase (): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ """Pix2StructImageProcessor. Please upgrade torch.""" ) def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): requires_backends(_lowerCAmelCase , ["""torch"""] ) _check_torch_version() __lowerCAmelCase = image_tensor.unsqueeze(0 ) __lowerCAmelCase = torch.nn.functional.unfold(_lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __lowerCAmelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowerCAmelCase , _lowerCAmelCase , -1 ) __lowerCAmelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def lowercase (_lowerCAmelCase , _lowerCAmelCase = 36 , _lowerCAmelCase = "black" , _lowerCAmelCase = "white" , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = None , _lowerCAmelCase = None , ): requires_backends(_lowerCAmelCase , """vision""" ) # Add new lines so that each line is no more than 80 characters. __lowerCAmelCase = textwrap.TextWrapper(width=80 ) __lowerCAmelCase = wrapper.wrap(text=_lowerCAmelCase ) __lowerCAmelCase = """\n""".join(_lowerCAmelCase ) if font_bytes is not None and font_path is None: __lowerCAmelCase = io.BytesIO(_lowerCAmelCase ) elif font_path is not None: __lowerCAmelCase = font_path else: __lowerCAmelCase = hf_hub_download(_lowerCAmelCase , """Arial.TTF""" ) __lowerCAmelCase = ImageFont.truetype(_lowerCAmelCase , encoding="""UTF-8""" , size=_lowerCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowerCAmelCase = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , _lowerCAmelCase ) ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = temp_draw.textbbox((0, 0) , _lowerCAmelCase , _lowerCAmelCase ) # Create the actual image with a bit of padding around the text. __lowerCAmelCase = text_width + left_padding + right_padding __lowerCAmelCase = text_height + top_padding + bottom_padding __lowerCAmelCase = Image.new("""RGB""" , (image_width, image_height) , _lowerCAmelCase ) __lowerCAmelCase = ImageDraw.Draw(_lowerCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=_lowerCAmelCase , fill=_lowerCAmelCase , font=_lowerCAmelCase ) return image def lowercase (_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): requires_backends(_lowerCAmelCase , """vision""" ) # Convert to PIL image if necessary __lowerCAmelCase = to_pil_image(_lowerCAmelCase ) __lowerCAmelCase = render_text(_lowerCAmelCase , **_lowerCAmelCase ) __lowerCAmelCase = max(header_image.width , image.width ) __lowerCAmelCase = int(image.height * (new_width / image.width) ) __lowerCAmelCase = int(header_image.height * (new_width / header_image.width) ) __lowerCAmelCase = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __lowerCAmelCase = to_numpy_array(_lowerCAmelCase ) if infer_channel_dimension_format(_lowerCAmelCase ) == ChannelDimension.LAST: __lowerCAmelCase = to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) return new_image class lowerCAmelCase_ ( A__ ): '''simple docstring''' _snake_case = ['''flattened_patches'''] def __init__( self , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = 2_048 , snake_case_ = False , **snake_case_ , ) -> None: super().__init__(**snake_case_ ) __lowerCAmelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} __lowerCAmelCase = do_normalize __lowerCAmelCase = do_convert_rgb __lowerCAmelCase = max_patches __lowerCAmelCase = is_vqa def A__ ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> np.ndarray: requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch __lowerCAmelCase = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST ) __lowerCAmelCase = torch.from_numpy(snake_case_ ) __lowerCAmelCase , __lowerCAmelCase = patch_size["""height"""], patch_size["""width"""] __lowerCAmelCase , __lowerCAmelCase = get_image_size(snake_case_ ) # maximize scale s.t. __lowerCAmelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowerCAmelCase = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 ) __lowerCAmelCase = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 ) __lowerCAmelCase = max(num_feasible_rows * patch_height , 1 ) __lowerCAmelCase = max(num_feasible_cols * patch_width , 1 ) __lowerCAmelCase = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowerCAmelCase = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ ) __lowerCAmelCase = patches.shape __lowerCAmelCase = patches_shape[1] __lowerCAmelCase = patches_shape[2] __lowerCAmelCase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowerCAmelCase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowerCAmelCase = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] ) __lowerCAmelCase = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowerCAmelCase = row_ids.to(torch.floataa ) __lowerCAmelCase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowerCAmelCase = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowerCAmelCase = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float() __lowerCAmelCase = to_numpy_array(snake_case_ ) return result def A__ ( self , snake_case_ , snake_case_ = None , **snake_case_ ) -> np.ndarray: if image.dtype == np.uinta: __lowerCAmelCase = image.astype(np.floataa ) # take mean across the whole `image` __lowerCAmelCase = np.mean(snake_case_ ) __lowerCAmelCase = np.std(snake_case_ ) __lowerCAmelCase = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ ) def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> ImageInput: __lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCAmelCase = patch_size if patch_size is not None else self.patch_size __lowerCAmelCase = max_patches if max_patches is not None else self.max_patches __lowerCAmelCase = self.is_vqa if kwargs.get("""data_format""" , snake_case_ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) __lowerCAmelCase = make_list_of_images(snake_case_ ) if not valid_images(snake_case_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCAmelCase = [convert_to_rgb(snake_case_ ) for image in images] # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(snake_case_ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) __lowerCAmelCase = kwargs.pop("""font_bytes""" , snake_case_ ) __lowerCAmelCase = kwargs.pop("""font_path""" , snake_case_ ) if isinstance(snake_case_ , snake_case_ ): __lowerCAmelCase = [header_text] * len(snake_case_ ) __lowerCAmelCase = [ render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ ) for i, image in enumerate(snake_case_ ) ] if do_normalize: __lowerCAmelCase = [self.normalize(image=snake_case_ ) for image in images] # convert to torch tensor and permute __lowerCAmelCase = [ self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ ) for image in images ] # create attention mask in numpy __lowerCAmelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowerCAmelCase = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ ) return encoded_outputs
465
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = DanceDiffusionPipeline lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS lowercase__ = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS lowercase__ = False lowercase__ = False def lowercase_ ( self : str ): torch.manual_seed(0 ) a : Union[str, Any] = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__snake_case , use_timestep_embedding=__snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) a : Optional[int] = IPNDMScheduler() a : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, } return components def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : Optional[Any]=0 ): if str(__snake_case ).startswith('mps' ): a : List[str] = torch.manual_seed(__snake_case ) else: a : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[int] = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def lowercase_ ( self : Any ): a : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator a : int = self.get_dummy_components() a : Dict = DanceDiffusionPipeline(**__snake_case ) a : Any = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : Any = self.get_dummy_inputs(__snake_case ) a : List[str] = pipe(**__snake_case ) a : Optional[Any] = output.audios a : Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) a : List[str] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowercase_ ( self : Optional[int] ): return super().test_save_load_local() @skip_mps def lowercase_ ( self : str ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def lowercase_ ( self : Optional[int] ): return super().test_save_load_optional_components() @skip_mps def lowercase_ ( self : Dict ): return super().test_attention_slicing_forward_pass() def lowercase_ ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : str ): a : Union[str, Any] = torch_device a : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) a : Union[str, Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : str = torch.manual_seed(0 ) a : Any = pipe(generator=__snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) a : str = output.audios a : Tuple = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a : Optional[int] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : str ): a : Dict = torch_device a : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : Optional[Any] = torch.manual_seed(0 ) a : Dict = pipe(generator=__snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 ) a : Union[str, Any] = output.audios a : Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
195
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowerCAmelCase: Optional[Any] = logging.get_logger(__name__) class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , *__snake_case : List[str] , **__snake_case : Union[str, Any] ): warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
195
1
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def _A ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
41
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __A ( *__lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase=True , __lowerCamelCase=2 ) -> Dict: from .. import __version__ a = take_from a = () if not isinstance(args[0] , __lowerCamelCase ): a = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse(__lowerCamelCase ): raise ValueError( f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'' f' version {__version__} is >= {version_name}' ) a = None if isinstance(__lowerCamelCase , __lowerCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__lowerCamelCase ),) a = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.' elif hasattr(__lowerCamelCase , __lowerCamelCase ): values += (getattr(__lowerCamelCase , __lowerCamelCase ),) a = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.' elif deprecated_kwargs is None: a = f'`{attribute}` is deprecated and will be removed in version {version_name}.' if warning is not None: a = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , __lowerCamelCase , stacklevel=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0: a = inspect.getouterframes(inspect.currentframe() )[1] a = call_frame.filename a = call_frame.lineno a = call_frame.function a , a = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' ) if len(__lowerCamelCase ) == 0: return elif len(__lowerCamelCase ) == 1: return values[0] return values
468
0
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __UpperCamelCase ( SCREAMING_SNAKE_CASE ): def __init__( self: str , __UpperCamelCase: NestedDataStructureLike[PathLike] , __UpperCamelCase: Optional[NamedSplit] = None , __UpperCamelCase: Optional[Features] = None , __UpperCamelCase: str = None , __UpperCamelCase: bool = False , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , **__UpperCamelCase: Union[str, Any] , ): '''simple docstring''' super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) __magic_name__ = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} __magic_name__ = Text( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , **__UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' if self.streaming: __magic_name__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) __magic_name__ = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset
721
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __UpperCamelCase ( SCREAMING_SNAKE_CASE ): _lowercase : int = 0 _lowercase : bool = False _lowercase : float = 3.0 class __UpperCamelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' __magic_name__ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 ) AcceleratorState._reset_state() __magic_name__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __magic_name__ = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 20_00 ) self.assertEqual(scaler._enabled , __UpperCamelCase ) @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ): '''simple docstring''' __magic_name__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": A__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) A__ = Accelerator(kwargs_handlers=[ddp_scaler]) A__ = torch.nn.Linear(100, 200) A__ = accelerator.prepare(model) # Check the values changed in kwargs A__ = "" A__ = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
184
0
import requests from bsa import BeautifulSoup def a (_lowerCAmelCase = "https://www.worldometers.info/coronavirus" ): SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(_lowerCAmelCase ).text , '''html.parser''' ) SCREAMING_SNAKE_CASE_ = soup.findAll('''h1''' ) SCREAMING_SNAKE_CASE_ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCAmelCase , _lowerCAmelCase )} if __name__ == "__main__": print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""") for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
234
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ ( __UpperCAmelCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor" SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer" def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ): super().__init__(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' ) else: SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = args[0] SCREAMING_SNAKE_CASE_ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE_ = encodings['''input_ids'''] return inputs def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def _A ( self: List[Any] ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.tokenizer yield SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False
234
1
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Optional[int] = BartphoTokenizer __snake_case : Tuple = False __snake_case : List[str] = True def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) SCREAMING_SNAKE_CASE = BartphoTokenizer(lowerCamelCase__ ,self.monolingual_vocab_file ,**self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : List[str] ) -> Any: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[int] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = """This is a là test""" SCREAMING_SNAKE_CASE = """This is a<unk><unk> test""" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = BartphoTokenizer(lowerCamelCase__ ,self.monolingual_vocab_file ,**self.special_tokens_map ) SCREAMING_SNAKE_CASE = """This is a là test""" SCREAMING_SNAKE_CASE = """▁This ▁is ▁a ▁l à ▁t est""".split() SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
116
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = "summarization" __snake_case : List[str] = ["loss"] __snake_case : Optional[int] = ROUGE_KEYS __snake_case : List[Any] = "rouge2" def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: SCREAMING_SNAKE_CASE = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowerCamelCase__ ,num_labels=lowerCamelCase__ ,mode=self.mode ,**lowerCamelCase__ ) use_task_specific_params(self.model ,"""summarization""" ) save_git_info(self.hparams.output_dir ) SCREAMING_SNAKE_CASE = Path(self.output_dir ) / """metrics.json""" SCREAMING_SNAKE_CASE = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams ,self.hparams_save_path ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.config.model_type SCREAMING_SNAKE_CASE = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size SCREAMING_SNAKE_CASE = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } SCREAMING_SNAKE_CASE = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } SCREAMING_SNAKE_CASE = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} SCREAMING_SNAKE_CASE = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) SCREAMING_SNAKE_CASE = get_git_info()["""repo_sha"""] SCREAMING_SNAKE_CASE = hparams.num_workers SCREAMING_SNAKE_CASE = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = self.tokenizer.lang_code_to_id[hparams.tgt_lang] SCREAMING_SNAKE_CASE = self.decoder_start_token_id SCREAMING_SNAKE_CASE = ( SeqaSeqDataset if hasattr(self.tokenizer ,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: SCREAMING_SNAKE_CASE = self.hparams.eval_max_gen_length else: SCREAMING_SNAKE_CASE = self.model.config.max_length SCREAMING_SNAKE_CASE = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]: '''simple docstring''' SCREAMING_SNAKE_CASE = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowerCamelCase__ ,Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / """tok_batch.json""" ) SCREAMING_SNAKE_CASE = True return readable_batch def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(lowerCamelCase__ ,**lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode( lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ ) return lmap(str.strip ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.tokenizer.pad_token_id SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = batch["""input_ids"""], batch["""attention_mask"""] SCREAMING_SNAKE_CASE = batch["""labels"""] if isinstance(self.model ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = self.model._shift_right(lowerCamelCase__ ) else: SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCamelCase__ ,lowerCamelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero SCREAMING_SNAKE_CASE = decoder_input_ids self.save_readable_batch(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,decoder_input_ids=lowerCamelCase__ ,use_cache=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ ) assert lm_logits.shape[-1] == self.vocab_size SCREAMING_SNAKE_CASE = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) ) else: SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCamelCase__ ,dim=-1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = label_smoothed_nll_loss( lowerCamelCase__ ,lowerCamelCase__ ,self.hparams.label_smoothing ,ignore_index=lowerCamelCase__ ) return (loss,) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self._step(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = dict(zip(self.loss_names ,lowerCamelCase__ ) ) # tokens per batch SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() SCREAMING_SNAKE_CASE = batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE = batch["""input_ids"""].eq(self.pad ).sum() SCREAMING_SNAKE_CASE = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ) -> Dict: '''simple docstring''' return self._generative_step(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]="val" ) -> Dict: '''simple docstring''' self.step_count += 1 SCREAMING_SNAKE_CASE = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} SCREAMING_SNAKE_CASE = losses["""loss"""] SCREAMING_SNAKE_CASE = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } SCREAMING_SNAKE_CASE = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()} SCREAMING_SNAKE_CASE = self.step_count self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path SCREAMING_SNAKE_CASE = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, F"""{prefix}_loss""": loss, F"""{prefix}_{self.val_metric}""": metric_tensor, } def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' return calculate_rouge(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : dict ) -> dict: '''simple docstring''' SCREAMING_SNAKE_CASE = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') SCREAMING_SNAKE_CASE = self.model.generate( batch["""input_ids"""] ,attention_mask=batch["""attention_mask"""] ,use_cache=lowerCamelCase__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) SCREAMING_SNAKE_CASE = (time.time() - ta) / batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE = self.ids_to_clean_text(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.ids_to_clean_text(batch["""labels"""] ) SCREAMING_SNAKE_CASE = self._step(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = dict(zip(self.loss_names ,lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = self.calc_generative_metrics(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.mean(lmap(lowerCamelCase__ ,lowerCamelCase__ ) ) base_metrics.update(gen_time=lowerCamelCase__ ,gen_len=lowerCamelCase__ ,preds=lowerCamelCase__ ,target=lowerCamelCase__ ,**lowerCamelCase__ ) return base_metrics def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ) -> Any: '''simple docstring''' return self._generative_step(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.validation_epoch_end(lowerCamelCase__ ,prefix="""test""" ) def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tuple ) -> SeqaSeqDataset: '''simple docstring''' SCREAMING_SNAKE_CASE = self.n_obs[type_path] SCREAMING_SNAKE_CASE = self.target_lens[type_path] SCREAMING_SNAKE_CASE = self.dataset_class( self.tokenizer ,type_path=lowerCamelCase__ ,n_obs=lowerCamelCase__ ,max_target_length=lowerCamelCase__ ,**self.dataset_kwargs ,) return dataset def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : bool = False ) -> DataLoader: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dataset(lowerCamelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE = dataset.make_sortish_sampler(lowerCamelCase__ ,distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase__ ,batch_sampler=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=dataset.collate_fn ,shuffle=lowerCamelCase__ ,num_workers=self.num_workers ,sampler=lowerCamelCase__ ,) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> DataLoader: '''simple docstring''' SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" ,batch_size=self.hparams.train_batch_size ,shuffle=lowerCamelCase__ ) return dataloader def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""val""" ,batch_size=self.hparams.eval_batch_size ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""test""" ,batch_size=self.hparams.eval_batch_size ) @staticmethod def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ) -> List[Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCamelCase__ ,lowerCamelCase__ ) add_generic_args(lowerCamelCase__ ,lowerCamelCase__ ) parser.add_argument( """--max_source_length""" ,default=1024 ,type=lowerCamelCase__ ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument( """--max_target_length""" ,default=56 ,type=lowerCamelCase__ ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument( """--val_max_target_length""" ,default=142 ,type=lowerCamelCase__ ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument( """--test_max_target_length""" ,default=142 ,type=lowerCamelCase__ ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument("""--freeze_encoder""" ,action="""store_true""" ) parser.add_argument("""--freeze_embeds""" ,action="""store_true""" ) parser.add_argument("""--sortish_sampler""" ,action="""store_true""" ,default=lowerCamelCase__ ) parser.add_argument("""--overwrite_output_dir""" ,action="""store_true""" ,default=lowerCamelCase__ ) parser.add_argument("""--max_tokens_per_batch""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ) parser.add_argument("""--logger_name""" ,type=lowerCamelCase__ ,choices=["""default""", """wandb""", """wandb_shared"""] ,default="""default""" ) parser.add_argument("""--n_train""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" ,type=lowerCamelCase__ ,default=500 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" ,type=lowerCamelCase__ ,default="""summarization""" ,required=lowerCamelCase__ ,help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" ,type=lowerCamelCase__ ,default=0.0 ,required=lowerCamelCase__ ) parser.add_argument("""--src_lang""" ,type=lowerCamelCase__ ,default="""""" ,required=lowerCamelCase__ ) parser.add_argument("""--tgt_lang""" ,type=lowerCamelCase__ ,default="""""" ,required=lowerCamelCase__ ) parser.add_argument("""--eval_beams""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ ) parser.add_argument( """--val_metric""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,required=lowerCamelCase__ ,choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" ,type=lowerCamelCase__ ,default=1 ,required=lowerCamelCase__ ,help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" ,type=lowerCamelCase__ ,default=-1 ,required=lowerCamelCase__ ,help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) ,) return parser class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[Any] = "translation" __snake_case : Tuple = ["loss"] __snake_case : Union[str, Any] = ["bleu"] __snake_case : str = "bleu" def __init__( self : str ,lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Tuple ) -> List[str]: '''simple docstring''' super().__init__(lowerCamelCase__ ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hparams.src_lang SCREAMING_SNAKE_CASE = hparams.tgt_lang def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ) -> dict: '''simple docstring''' return calculate_bleu(lowerCamelCase__ ,lowerCamelCase__ ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> SummarizationModule: '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: SCREAMING_SNAKE_CASE = SummarizationModule(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE = TranslationModule(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): SCREAMING_SNAKE_CASE = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE = os.environ.get("""WANDB_PROJECT""" , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: SCREAMING_SNAKE_CASE = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = args.val_metric == """loss""" SCREAMING_SNAKE_CASE = generic_train( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model SCREAMING_SNAKE_CASE = """""" SCREAMING_SNAKE_CASE = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_SCREAMING_SNAKE_CASE ) ) if checkpoints: SCREAMING_SNAKE_CASE = checkpoints[-1] SCREAMING_SNAKE_CASE = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() SCREAMING_SNAKE_CASE_ = pl.Trainer.add_argparse_args(parser) SCREAMING_SNAKE_CASE_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) SCREAMING_SNAKE_CASE_ = parser.parse_args() main(args)
116
1
'''simple docstring''' import math def _lowerCAmelCase (_lowercase , _lowercase ): """simple docstring""" a__ = len(_lowercase ) a__ = int(math.floor(math.sqrt(_lowercase ) ) ) a__ = 0 while arr[min(_lowercase , _lowercase ) - 1] < x: a__ = step step += int(math.floor(math.sqrt(_lowercase ) ) ) if prev >= n: return -1 while arr[prev] < x: a__ = prev + 1 if prev == min(_lowercase , _lowercase ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": UpperCamelCase_ : Any = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase_ : int = [int(item) for item in user_input.split(""",""")] UpperCamelCase_ : int = int(input("""Enter the number to be searched:\n""")) UpperCamelCase_ : Any = jump_search(arr, x) if res == -1: print("""Number not found!""") else: print(F"Number {x} is at index {res}")
331
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ : Optional[int] = logging.get_logger(__name__) UpperCamelCase_ : str = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class lowerCamelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCamelCase__ = '''gpt_neox_japanese''' def __init__( self : Tuple ,a__ : str=3_20_00 ,a__ : Union[str, Any]=25_60 ,a__ : Optional[Any]=32 ,a__ : Tuple=32 ,a__ : str=4 ,a__ : int="gelu" ,a__ : Tuple=1.00 ,a__ : Optional[int]=1_00_00 ,a__ : Union[str, Any]=20_48 ,a__ : List[str]=0.02 ,a__ : Optional[int]=1e-5 ,a__ : Union[str, Any]=True ,a__ : int=3_19_96 ,a__ : List[Any]=3_19_99 ,a__ : str=0.1 ,a__ : Dict=0.0 ,**a__ : List[Any] ,): super().__init__(bos_token_id=a__ ,eos_token_id=a__ ,**a__ ) a__ = vocab_size a__ = max_position_embeddings a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_multiple_size a__ = hidden_act a__ = rotary_pct a__ = rotary_emb_base a__ = initializer_range a__ = layer_norm_eps a__ = use_cache a__ = attention_dropout a__ = hidden_dropout
331
1
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __UpperCamelCase ( _A : str = "isbn/0140328726" ) -> dict: """simple docstring""" lowerCAmelCase : Union[str, Any] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: lowerCAmelCase : Optional[int] = F"{olid} is not a valid Open Library olid" raise ValueError(_A ) return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json() def __UpperCamelCase ( _A : dict ) -> dict: """simple docstring""" lowerCAmelCase : List[str] = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } lowerCAmelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowerCAmelCase : Tuple = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] lowerCAmelCase : Optional[int] = data['First sentence']['value'] for key, value in data.items(): if isinstance(_A , _A ): lowerCAmelCase : List[Any] = ', '.join(_A ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowerCAmelCase : Optional[int] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(f"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowerCAmelCase : List[str] = summarize_book(get_openlibrary_data(f"""isbn/{isbn}""")) print('\n'.join(f"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"""Sorry, there are no results for ISBN: {isbn}.""")
702
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase ( a , unittest.TestCase ): _lowerCamelCase : str = PegasusTokenizer _lowerCamelCase : Union[str, Any] = PegasusTokenizerFast _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Optional[Any] = True def lowercase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self ): return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def lowercase ( self , **snake_case__ ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def lowercase ( self , snake_case__ ): return ("This is a test", "This is a test") def lowercase ( self ): lowerCAmelCase : Optional[int] = '</s>' lowerCAmelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(snake_case__ ) , 1103 ) def lowercase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def lowercase ( self ): lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : Optional[Any] = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Optional[Any] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.' lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self ): lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example'] lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny'] lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) lowerCAmelCase : Dict = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self ): # fmt: off lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class lowerCAmelCase ( a , unittest.TestCase ): _lowerCamelCase : Optional[Any] = PegasusTokenizer _lowerCamelCase : str = PegasusTokenizerFast _lowerCamelCase : Tuple = True _lowerCamelCase : int = True def lowercase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self ): return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def lowercase ( self , **snake_case__ ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def lowercase ( self , snake_case__ ): return ("This is a test", "This is a test") def lowercase ( self ): lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : List[str] = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def lowercase ( self ): lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example'] lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny'] lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) lowerCAmelCase : List[str] = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def lowercase ( self ): lowerCAmelCase : List[str] = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
646
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = 1 for i in range(1 , num + 1 ): fact *= i return fact def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = 0 while number > 0: __lowerCAmelCase = number % 10 sum_of_digits += last_digit __lowerCAmelCase = number // 10 # Removing the last_digit from the given number return sum_of_digits def _lowerCamelCase ( _UpperCamelCase = 100 ): '''simple docstring''' __lowerCAmelCase = factorial(__lowercase ) __lowerCAmelCase = split_and_add(__lowercase ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
636
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class lowercase_ ( __snake_case ): _lowerCamelCase = 'linear' _lowerCamelCase = 'cosine' _lowerCamelCase = 'cosine_with_restarts' _lowerCamelCase = 'polynomial' _lowerCamelCase = 'constant' _lowerCamelCase = 'constant_with_warmup' _lowerCamelCase = 'piecewise_constant' def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]: '''simple docstring''' return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1.0 , __lowercase ) ) return 1.0 return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' _snake_case : Optional[Any] = {} _snake_case : Optional[int] = step_rules.split("," ) for rule_str in rule_list[:-1]: _snake_case ,_snake_case : str = rule_str.split(":" ) _snake_case : Dict = int(__lowercase ) _snake_case : List[str] = float(__lowercase ) _snake_case : Tuple = value _snake_case : str = float(rule_list[-1] ) def create_rules_function(__lowercase , __lowercase ): def rule_func(__lowercase ) -> float: _snake_case : List[str] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__lowercase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _snake_case : int = create_rules_function(__lowercase , __lowercase ) return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]: '''simple docstring''' _snake_case : List[Any] = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _snake_case : Tuple = lr_init - lr_end _snake_case : Any = num_training_steps - num_warmup_steps _snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps _snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__lowercase , __lowercase , __lowercase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]: '''simple docstring''' _snake_case : Any = SchedulerType(__lowercase ) _snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__lowercase , last_epoch=__lowercase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , ) return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase )
670
0
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _snake_case ( a__ ): def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=0): UpperCAmelCase__ : Dict = 1.0 if scale is None else scale UpperCAmelCase__ : Dict = 0.0 if loc is None else loc super().__init__(_lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCamelCase)]) @property def snake_case__ ( self): return self.base_dist.mean * self.scale + self.loc @property def snake_case__ ( self): return self.base_dist.variance * self.scale**2 @property def snake_case__ ( self): return self.variance.sqrt() class _snake_case ( nn.Module ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase): super().__init__(**_lowerCamelCase) UpperCAmelCase__ : int = args_dim UpperCAmelCase__ : Optional[int] = nn.ModuleList([nn.Linear(_lowerCamelCase , _lowerCamelCase) for dim in args_dim.values()]) UpperCAmelCase__ : Optional[Any] = domain_map def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Optional[int] = [proj(_lowerCamelCase) for proj in self.proj] return self.domain_map(*_lowerCamelCase) class _snake_case ( nn.Module ): def __init__( self , _lowerCamelCase): super().__init__() UpperCAmelCase__ : Optional[int] = function def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase): return self.function(_lowerCamelCase , *_lowerCamelCase) class _snake_case : lowerCAmelCase :type lowerCAmelCase :int lowerCAmelCase :Dict[str, int] def __init__( self , _lowerCamelCase = 1): UpperCAmelCase__ : Optional[Any] = dim UpperCAmelCase__ : int = {k: dim * self.args_dim[k] for k in self.args_dim} def snake_case__ ( self , _lowerCamelCase): if self.dim == 1: return self.distribution_class(*_lowerCamelCase) else: return Independent(self.distribution_class(*_lowerCamelCase) , 1) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ): UpperCAmelCase__ : Dict = self._base_distribution(_lowerCamelCase) if loc is None and scale is None: return distr else: return AffineTransformed(_lowerCamelCase , loc=_lowerCamelCase , scale=_lowerCamelCase , event_dim=self.event_dim) @property def snake_case__ ( self): return () if self.dim == 1 else (self.dim,) @property def snake_case__ ( self): return len(self.event_shape) @property def snake_case__ ( self): return 0.0 def snake_case__ ( self , _lowerCamelCase): return ParameterProjection( in_features=_lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , ) def snake_case__ ( self , *_lowerCamelCase): raise NotImplementedError() @staticmethod def snake_case__ ( _lowerCamelCase): return (x + torch.sqrt(torch.square(_lowerCamelCase) + 4.0)) / 2.0 class _snake_case ( a__ ): lowerCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} lowerCAmelCase :type = StudentT @classmethod def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps) UpperCAmelCase__ : Any = 2.0 + cls.squareplus(_lowerCamelCase) return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) class _snake_case ( a__ ): lowerCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1} lowerCAmelCase :type = Normal @classmethod def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps) return loc.squeeze(-1), scale.squeeze(-1) class _snake_case ( a__ ): lowerCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1} lowerCAmelCase :type = NegativeBinomial @classmethod def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase) return total_count.squeeze(-1), logits.squeeze(-1) def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ , UpperCAmelCase__ : Tuple = distr_args if self.dim == 1: return self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase) else: return Independent(self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase) , 1) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None): UpperCAmelCase__ , UpperCAmelCase__ : Any = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits))
113
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class _snake_case ( unittest.TestCase ): @slow def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""") UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""") UpperCAmelCase__ : Dict = """The dog is cute and lives in the garden house""" UpperCAmelCase__ : Dict = jnp.array([tokenizer.encode(_lowerCamelCase)]) UpperCAmelCase__ : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase__ : Any = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]]) UpperCAmelCase__ : Any = model(_lowerCamelCase)["""last_hidden_state"""] self.assertEqual(output.shape , _lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3))
113
1
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> List[str]: '''simple docstring''' if not (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )): raise ValueError('longest_common_substring() takes two strings for inputs' ) SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: SCREAMING_SNAKE_CASE = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
439
def a (lowerCAmelCase__ = 1_000_000 ): __a = 1 __a = 1 __a = {1: 1} for inputa in range(2 , lowerCAmelCase__ ): __a = 0 __a = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: __a = (3 * number) + 1 counter += 1 if inputa not in counters: __a = counter if counter > pre_counter: __a = inputa __a = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
99
0
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int: a_ : Optional[int] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue a_ : Tuple = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head" ) a_ : List[str] = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head" ) a_ : str = key.replace("heads.cmd.itm_head.cls", "itm_head" ) a_ : List[Any] = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler" ) a_ : Dict = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale" ) a_ : List[Any] = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head" ) a_ : Any = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head" ) a_ : Any = key.replace("mm_text_projection", "flava.text_to_mm_projection" ) a_ : Optional[Any] = key.replace("mm_image_projection", "flava.image_to_mm_projection" ) a_ : Tuple = key.replace("image_encoder.module", "flava.image_model" ) a_ : Tuple = key.replace("text_encoder.module", "flava.text_model" ) a_ : Optional[int] = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token" ) a_ : Any = key.replace("mm_encoder.module", "flava.multimodal_model" ) a_ : int = key.replace("text_projection", "flava.text_projection" ) a_ : Union[str, Any] = key.replace("image_projection", "flava.image_projection" ) a_ : Optional[int] = value.float() for key, value in codebook_state_dict.items(): a_ : Dict = value return upgrade @torch.no_grad() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None ) -> int: if config_path is not None: a_ : List[Any] = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: a_ : Union[str, Any] = FlavaConfig() a_ : Optional[Any] = FlavaForPreTraining(SCREAMING_SNAKE_CASE__ ).eval() a_ : Optional[Any] = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, save_checkpoint=SCREAMING_SNAKE_CASE__ ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): a_ : List[str] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" ) else: a_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__, map_location="cpu" ) a_ : Tuple = upgrade_state_dict(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = hf_model.state_dict() a_ : List[Any] = count_parameters(SCREAMING_SNAKE_CASE__ ) a_ : Any = count_parameters(SCREAMING_SNAKE_CASE__ ) + count_parameters(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=1e-3 ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
370
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case_ ( a_ ,unittest.TestCase ): __lowerCAmelCase = KandinskyImgaImgPipeline __lowerCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"] __lowerCAmelCase = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] __lowerCAmelCase = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __lowerCAmelCase = False @property def snake_case_ ( self ): return 3_2 @property def snake_case_ ( self ): return 3_2 @property def snake_case_ ( self ): return self.time_input_dim @property def snake_case_ ( self ): return self.time_input_dim * 4 @property def snake_case_ ( self ): return 1_0_0 @property def snake_case_ ( self ): a_ : List[str] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def snake_case_ ( self ): torch.manual_seed(0 ) a_ : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) a_ : str = MultilingualCLIP(a_ ) a_ : Any = text_encoder.eval() return text_encoder @property def snake_case_ ( self ): torch.manual_seed(0 ) a_ : Union[str, Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a_ : Dict = UNetaDConditionModel(**a_ ) return model @property def snake_case_ ( self ): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self ): torch.manual_seed(0 ) a_ : int = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ ( self ): a_ : Dict = self.dummy_text_encoder a_ : Dict = self.dummy_tokenizer a_ : Optional[int] = self.dummy_unet a_ : Dict = self.dummy_movq a_ : List[str] = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.00_085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } a_ : List[Any] = DDIMScheduler(**a_ ) a_ : Union[str, Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def snake_case_ ( self , a_ , a_=0 ): a_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a_ ) ).to(a_ ) a_ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a_ ) # create init_image a_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a_ ) ).to(a_ ) a_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] a_ : int = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(a_ ).startswith("mps" ): a_ : Any = torch.manual_seed(a_ ) else: a_ : Any = torch.Generator(device=a_ ).manual_seed(a_ ) a_ : List[Any] = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def snake_case_ ( self ): a_ : Optional[Any] = "cpu" a_ : List[Any] = self.get_dummy_components() a_ : Union[str, Any] = self.pipeline_class(**a_ ) a_ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a_ : Union[str, Any] = pipe(**self.get_dummy_inputs(a_ ) ) a_ : Any = output.images a_ : str = pipe( **self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0] a_ : List[str] = image[0, -3:, -3:, -1] a_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a_ : Optional[int] = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def snake_case_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ): a_ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) a_ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) a_ : Optional[Any] = "A red cartoon frog, 4k" a_ : int = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(a_ ) a_ : int = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) a_ : List[Any] = pipeline.to(a_ ) pipeline.set_progress_bar_config(disable=a_ ) a_ : int = torch.Generator(device="cpu" ).manual_seed(0 ) a_ , a_ : Optional[int] = pipe_prior( a_ , generator=a_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a_ : List[Any] = pipeline( a_ , image=a_ , image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) a_ : int = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(a_ , a_ )
370
1
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger("transformers.models.speecht5") def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): hf_model.apply_weight_norm() A__ = checkpoint["""input_conv.weight_g"""] A__ = checkpoint["""input_conv.weight_v"""] A__ = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[F"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[F"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint["""output_conv.1.weight_g"""] A__ = checkpoint["""output_conv.1.weight_v"""] A__ = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=None , ): if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(UpperCamelCase ) A__ = torch.load(UpperCamelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] , UpperCamelCase , UpperCamelCase ) A__ = np.load(UpperCamelCase ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(UpperCamelCase ).float() A__ = torch.from_numpy(UpperCamelCase ).float() model.save_pretrained(UpperCamelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) lowerCamelCase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
574
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule lowerCamelCase__ = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
574
1
"""simple docstring""" from typing import List import numpy as np def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = {key: len(A ) for key, value in gen_kwargs.items() if isinstance(A , A )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) _UpperCAmelCase = max(lists_lengths.values() , default=0 ) return max(1 , A ) def UpperCAmelCase ( A : str , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = [] for group_idx in range(A ): _UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCAmelCase = range(A , start + num_shards_to_add ) shards_indices_per_group.append(A ) return shards_indices_per_group def UpperCAmelCase ( A : Tuple , A : List[str] ): '''simple docstring''' _UpperCAmelCase = _number_of_shards_in_gen_kwargs(A ) if num_shards == 1: return [dict(A )] else: _UpperCAmelCase = _distribute_shards(num_shards=A , max_num_jobs=A ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A , A ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A ) ) ] def UpperCAmelCase ( A : Tuple ): '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def UpperCAmelCase ( A : Dict , A : List[str] ): '''simple docstring''' _UpperCAmelCase = {len(A ) for value in gen_kwargs.values() if isinstance(A , A )} _UpperCAmelCase = {} for size in list_sizes: _UpperCAmelCase = list(range(A ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCAmelCase = dict(A ) for key, value in shuffled_kwargs.items(): if isinstance(A , A ): _UpperCAmelCase = [value[i] for i in indices_per_size[len(A )]] return shuffled_kwargs
703
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Optional[Any] = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
457
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=None , ) -> Tuple: a_ : Any = parent a_ : List[str] = 13 a_ : Dict = 7 a_ : List[str] = True a_ : str = True a_ : List[str] = True a_ : Any = True a_ : List[Any] = 99 a_ : Optional[Any] = 384 a_ : List[str] = 2 a_ : Optional[int] = 4 a_ : Optional[int] = 37 a_ : Union[str, Any] = '''gelu''' a_ : List[str] = 0.1 a_ : str = 0.1 a_ : Optional[Any] = 512 a_ : Union[str, Any] = 16 a_ : Optional[Any] = 2 a_ : Optional[int] = 0.02 a_ : Union[str, Any] = 3 a_ : Union[str, Any] = 4 a_ : List[Any] = 128 a_ : Optional[Any] = 2 a_ : Any = 9 a_ : Optional[int] = 1 a_ : int = None def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : int = None if self.use_input_mask: a_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) a_ : Optional[int] = None if self.use_token_type_ids: a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : Optional[Any] = None a_ : Optional[Any] = None a_ : Union[str, Any] = None if self.use_labels: a_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: a_ : Tuple = TFConvBertModel(config=__SCREAMING_SNAKE_CASE ) a_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} a_ : Optional[Any] = [input_ids, input_mask] a_ : Tuple = model(__SCREAMING_SNAKE_CASE ) a_ : int = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> str: a_ : List[Any] = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE ) a_ : Dict = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } a_ : str = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Any: a_ : Union[str, Any] = self.num_labels a_ : List[str] = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } a_ : Tuple = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: a_ : Optional[Any] = self.num_choices a_ : int = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) a_ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) a_ : Tuple = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) a_ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) a_ : Union[str, Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> str: a_ : Optional[Any] = self.num_labels a_ : Optional[int] = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE ) a_ : Tuple = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } a_ : Tuple = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: a_ : List[Any] = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } a_ : Dict = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: a_ : List[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): snake_case__ = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) snake_case__ = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False snake_case__ = False def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Optional[int] = TFConvBertModelTester(self ) a_ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: a_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a_ : Any = True a_ : Optional[int] = True if hasattr(__SCREAMING_SNAKE_CASE , '''use_cache''' ): a_ : Dict = True a_ : Any = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) a_ : List[Any] = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: a_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE ) a_ : Any = len(model(__SCREAMING_SNAKE_CASE ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' ) a_ : Optional[int] = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE ) a_ : Any = model(__SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: a_ : Optional[int] = outputs['''encoder_hidden_states'''] a_ : Dict = outputs['''encoder_attentions'''] else: a_ : Union[str, Any] = outputs['''hidden_states'''] a_ : Tuple = outputs['''attentions'''] self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: a_ : Tuple = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() a_ : Optional[Any] = True a_ : Dict = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length ) a_ : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) a_ : Dict = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE ) a_ : Any = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE ) def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE : List[str] ): a_ : int = len(__SCREAMING_SNAKE_CASE ) self.assertEqual(out_len % 2 , 0 ) a_ : Dict = outputs.decoder_attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE : List[str] ): a_ : Optional[int] = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: a_ : Tuple = True a_ : Tuple = False a_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE ) a_ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) a_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: a_ : Any = model_class(__SCREAMING_SNAKE_CASE ) a_ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] a_ : Union[str, Any] = True a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) a_ : int = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine a_ : Dict = True a_ : str = True a_ : int = model_class(__SCREAMING_SNAKE_CASE ) a_ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : int ) -> Any: a_ : int = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) a_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) a_ : Any = model(__SCREAMING_SNAKE_CASE )[0] a_ : str = [1, 6, 768] self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) a_ : Dict = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
466
0
"""simple docstring""" lowercase_ : Tuple = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def _lowerCAmelCase ( lowerCamelCase__ : dict, lowerCamelCase__ : str, lowerCamelCase__ : List[str] ) -> List[Any]: _SCREAMING_SNAKE_CASE : Any = set() # keep track of all the paths to be checked _SCREAMING_SNAKE_CASE : Optional[Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _SCREAMING_SNAKE_CASE : List[str] = queue.pop(0 ) # get the last node from the path _SCREAMING_SNAKE_CASE : List[str] = path[-1] if node not in explored: _SCREAMING_SNAKE_CASE : Dict = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _SCREAMING_SNAKE_CASE : Dict = list(lowerCamelCase__ ) new_path.append(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCamelCase__ ) # in case there's no path between the 2 nodes return [] def _lowerCAmelCase ( lowerCamelCase__ : dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str ) -> Optional[int]: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _SCREAMING_SNAKE_CASE : Dict = [start] _SCREAMING_SNAKE_CASE : List[Any] = set(lowerCamelCase__ ) # Keep tab on distances from `start` node. _SCREAMING_SNAKE_CASE : Tuple = {start: 0, target: -1} while queue: _SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 ) if node == target: _SCREAMING_SNAKE_CASE : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
718
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): A__ = """naver-clova-ix/donut-base-finetuned-docvqa""" A__ = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) A__ = """document_qa""" A__ = AutoProcessor A__ = VisionEncoderDecoderModel A__ = ["""image""", """text"""] A__ = ["""text"""] def __init__( self , *snake_case__ , **snake_case__ ): """simple docstring""" if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*snake_case__ , **snake_case__ ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" _SCREAMING_SNAKE_CASE : Optional[int] = task_prompt.replace("{user_input}" , snake_case__ ) _SCREAMING_SNAKE_CASE : Tuple = self.pre_processor.tokenizer( snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" ).input_ids _SCREAMING_SNAKE_CASE : Union[str, Any] = self.pre_processor(snake_case__ , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.pre_processor.batch_decode(snake_case__ )[0] _SCREAMING_SNAKE_CASE : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) _SCREAMING_SNAKE_CASE : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) _SCREAMING_SNAKE_CASE : Dict = re.sub(r"<.*?>" , "" , snake_case__ , count=1 ).strip() # remove first task start token _SCREAMING_SNAKE_CASE : Dict = self.pre_processor.tokenajson(snake_case__ ) return sequence["answer"]
295
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
150
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor snake_case : Optional[int] = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( _snake_case : int ) -> Union[str, Any]: '''simple docstring''' if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): __magic_name__ : List[str] = [image] __magic_name__ : Optional[int] = [trans(img.convert("RGB" ) ) for img in image] __magic_name__ : List[str] = torch.stack(_snake_case ) return image class _snake_case ( snake_case ): def __init__( self , _a , _a ): super().__init__() # make sure scheduler can always be converted to DDIM __magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_a , scheduler=_a ) def SCREAMING_SNAKE_CASE ( self , _a ): if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): # get the original timestep using init_timestep __magic_name__ : List[str] = min(int(num_inference_steps * strength ) , _a ) __magic_name__ : Dict = max(num_inference_steps - init_timestep , 0 ) __magic_name__ : Union[str, Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=None ): if not isinstance(_a , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}''' ) __magic_name__ : Any = image.to(device=_a , dtype=_a ) if isinstance(_a , _a ) and len(_a ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __magic_name__ : Optional[int] = init_latents.shape __magic_name__ : Optional[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) # get latents print("add noise to latents at timestep" , _a ) __magic_name__ : List[Any] = self.scheduler.add_noise(_a , _a , _a ) __magic_name__ : int = init_latents return latents @torch.no_grad() def __call__( self , _a = None , _a = 0.8 , _a = 1 , _a = None , _a = 0.0 , _a = 50 , _a = None , _a = "pil" , _a = True , ): self.check_inputs(_a ) # 2. Preprocess image __magic_name__ : str = preprocess(_a ) # 3. set timesteps self.scheduler.set_timesteps(_a , device=self.device ) __magic_name__ , __magic_name__ : str = self.get_timesteps(_a , _a , self.device ) __magic_name__ : List[str] = timesteps[:1].repeat(_a ) # 4. Prepare latent variables __magic_name__ : Optional[int] = self.prepare_latents(_a , _a , _a , self.unet.dtype , self.device , _a ) __magic_name__ : List[Any] = latents # 5. Denoising loop for t in self.progress_bar(_a ): # 1. predict noise model_output __magic_name__ : str = self.unet(_a , _a ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __magic_name__ : str = self.scheduler.step( _a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a , ).prev_sample __magic_name__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __magic_name__ : Optional[int] = self.numpy_to_pil(_a ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_a )
124
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__ ( lowercase_): '''simple docstring''' @slow @require_torch def __lowercase ( self ) -> Any: '''simple docstring''' __snake_case :Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __snake_case :Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __snake_case :Any = bertabert.config.encoder.vocab_size __snake_case :Optional[int] = tokenizer.sep_token_id __snake_case :Dict = tokenizer.cls_token_id __snake_case :List[str] = 1_28 __snake_case :List[Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __snake_case :Optional[int] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __snake_case :Any = train_dataset.select(range(32 ) ) __snake_case :str = val_dataset.select(range(16 ) ) __snake_case :Union[str, Any] = 4 def _map_to_encoder_decoder_inputs(a__ ): # Tokenizer will automatically set [BOS] <text> [EOS] __snake_case :Optional[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=a__ , max_length=5_12 ) __snake_case :Dict = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=a__ , max_length=1_28 ) __snake_case :Union[str, Any] = inputs.input_ids __snake_case :List[Any] = inputs.attention_mask __snake_case :str = outputs.input_ids __snake_case :Any = outputs.input_ids.copy() __snake_case :Dict = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __snake_case :Dict = outputs.attention_mask assert all(len(a__ ) == 5_12 for x in inputs.input_ids ) assert all(len(a__ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(a__ ): __snake_case :Optional[Any] = pred.label_ids __snake_case :Union[str, Any] = pred.predictions # all unnecessary tokens are removed __snake_case :Tuple = tokenizer.batch_decode(a__ , skip_special_tokens=a__ ) __snake_case :int = tokenizer.batch_decode(a__ , skip_special_tokens=a__ ) __snake_case :Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a__ ) )] ) / len(a__ ) return {"accuracy": accuracy} # map train dataset __snake_case :str = train_dataset.map( _map_to_encoder_decoder_inputs , batched=a__ , batch_size=a__ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __snake_case :Dict = val_dataset.map( _map_to_encoder_decoder_inputs , batched=a__ , batch_size=a__ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __snake_case :str = self.get_auto_remove_tmp_dir() __snake_case :Optional[int] = SeqaSeqTrainingArguments( output_dir=a__ , per_device_train_batch_size=a__ , per_device_eval_batch_size=a__ , predict_with_generate=a__ , evaluation_strategy="""steps""" , do_train=a__ , do_eval=a__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __snake_case :Optional[Any] = SeqaSeqTrainer( model=a__ , args=a__ , compute_metrics=_compute_metrics , train_dataset=a__ , eval_dataset=a__ , tokenizer=a__ , ) # start training trainer.train()
291
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ ( lowercase_ , unittest.TestCase): '''simple docstring''' lowerCamelCase : Optional[int] = GPTSanJapaneseTokenizer lowerCamelCase : Any = False lowerCamelCase : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False} def __lowercase ( self ) -> Any: '''simple docstring''' super().setUp() # fmt: off __snake_case :str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on __snake_case :Optional[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 __snake_case :Optional[int] = {"""unk_token""": """<unk>"""} __snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __snake_case :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(a__ ) ) def __lowercase ( self , **a__ ) -> Optional[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ ) def __lowercase ( self , a__ ) -> List[str]: '''simple docstring''' __snake_case :Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀""" __snake_case :Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def __lowercase ( self , a__ ) -> List[str]: '''simple docstring''' __snake_case , __snake_case :Dict = self.get_input_output_texts(a__ ) __snake_case :Dict = tokenizer.encode(a__ , add_special_tokens=a__ ) __snake_case :int = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ ) return text, ids def __lowercase ( self ) -> Optional[int]: '''simple docstring''' pass # TODO add if relevant def __lowercase ( self ) -> Optional[Any]: '''simple docstring''' pass # TODO add if relevant def __lowercase ( self ) -> Union[str, Any]: '''simple docstring''' pass # TODO add if relevant def __lowercase ( self ) -> str: '''simple docstring''' __snake_case :Optional[int] = self.get_tokenizer() # Testing tokenization __snake_case :int = """こんにちは、世界。 こんばんは、㔺界。""" __snake_case :Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] __snake_case :Optional[Any] = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids without special tokens __snake_case :List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] __snake_case :Any = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids with special tokens __snake_case :Tuple = tokens + [tokenizer.unk_token] __snake_case :Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] __snake_case :List[Any] = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual(a__ , a__ ) def __lowercase ( self ) -> List[str]: '''simple docstring''' __snake_case :Tuple = self.get_tokenizer() # Testing tokenization __snake_case :Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" __snake_case :Tuple = """こんにちは、、、、世界。こんばんは、、、、世界。""" __snake_case :str = tokenizer.encode(a__ ) __snake_case :Optional[int] = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ ) @slow def __lowercase ( self ) -> List[Any]: '''simple docstring''' __snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __snake_case :str = """こんにちは、世界。""" __snake_case :List[str] = """こんばんは、㔺界。😀""" __snake_case :Dict = """こんにちは、世界。こんばんは、世界。😀""" __snake_case :int = tokenizer.encode(prefix_text + input_text ) __snake_case :Optional[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) __snake_case :Any = tokenizer.encode(a__ , prefix_text=a__ ) __snake_case :Optional[Any] = tokenizer.decode(a__ ) __snake_case :Optional[Any] = tokenizer.decode(a__ ) __snake_case :Optional[Any] = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) @slow def __lowercase ( self ) -> Any: '''simple docstring''' __snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __snake_case :List[Any] = """こんにちは、世界。""" __snake_case :Dict = """こんばんは、㔺界。😀""" __snake_case :Optional[int] = len(tokenizer.encode(a__ ) ) - 2 __snake_case :Union[str, Any] = len(tokenizer.encode(a__ ) ) - 2 __snake_case :Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1) __snake_case :Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] __snake_case :Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) __snake_case :int = tokenizer(prefix_text + input_text ).token_type_ids __snake_case :List[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids __snake_case :int = tokenizer(a__ , prefix_text=a__ ).token_type_ids self.assertListEqual(a__ , a__ ) self.assertListEqual(a__ , a__ ) self.assertListEqual(a__ , a__ ) @slow def __lowercase ( self ) -> Optional[int]: '''simple docstring''' __snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __snake_case :Tuple = tokenizer.encode("""あンいワ""" ) __snake_case :List[str] = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) __snake_case :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) ) self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) ) self.assertNotEqual(a__ , a__ ) self.assertNotEqual(a__ , a__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowercase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __snake_case :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] __snake_case :Optional[int] = tokenizer(a__ , padding=a__ ) __snake_case :int = tokenizer.batch_encode_plus(a__ , padding=a__ ) # fmt: off __snake_case :Tuple = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] __snake_case :Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] __snake_case :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , a__ ) self.assertListEqual(x_token.token_type_ids , a__ ) self.assertListEqual(x_token.attention_mask , a__ ) self.assertListEqual(x_token_a.input_ids , a__ ) self.assertListEqual(x_token_a.token_type_ids , a__ ) self.assertListEqual(x_token_a.attention_mask , a__ ) def __lowercase ( self ) -> List[str]: '''simple docstring''' pass def __lowercase ( self ) -> Optional[int]: '''simple docstring''' pass
291
1
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig lowercase_ = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class __UpperCamelCase ( a_ ): """simple docstring""" lowerCAmelCase_ = """ernie_m""" lowerCAmelCase_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : List[str] , _A : int = 25_0002 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 3072 , _A : str = "gelu" , _A : float = 0.1 , _A : float = 0.1 , _A : int = 514 , _A : float = 0.02 , _A : int = 1 , _A : float = 1e-05 , _A : Dict=None , _A : List[str]=False , _A : Optional[Any]=0.0 , **_A : List[Any] , ): """simple docstring""" super().__init__(pad_token_id=_lowercase , **_lowercase ) __SCREAMING_SNAKE_CASE : Optional[int] = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = intermediate_size __SCREAMING_SNAKE_CASE : Dict = hidden_act __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Tuple = classifier_dropout __SCREAMING_SNAKE_CASE : int = is_decoder __SCREAMING_SNAKE_CASE : List[str] = act_dropout
74
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any ) -> Any: # Initialise PyTorch model lowerCAmelCase__ : Any = MobileBertConfig.from_json_file(lowerCamelCase ) print(F"Building PyTorch model from configuration: {config}" ) lowerCAmelCase__ : str = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCAmelCase__ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , lowerCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
308
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ :Dict = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Union[str, Any] = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Any = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
633
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] lowercase = mam_aaa['''model'''] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase = MaMaaaConfig( vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) lowercase = state_dict['''decoder.embed_tokens.weight'''] lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ ) model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowercase__ :Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") lowercase__ :Tuple = parser.parse_args() lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
633
1
"""simple docstring""" import socket def a ( ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM ) UpperCAmelCase_ :int = socket.gethostname() UpperCAmelCase_ :List[Any] = 12312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''', '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: UpperCAmelCase_ :int = sock.recv(1024 ) if not data: break out_file.write(__snake_case ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
608
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( A__ ): '''simple docstring''' def __init__( self : Optional[int] ): UpperCAmelCase_ :int = [] def snake_case_ ( self : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[Any] ): self.events.append('''on_init_end''' ) def snake_case_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : int , **snake_case : Dict ): self.events.append('''on_train_begin''' ) def snake_case_ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Any ): self.events.append('''on_train_end''' ) def snake_case_ ( self : Tuple , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[int] ): self.events.append('''on_epoch_begin''' ) def snake_case_ ( self : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Optional[int] ): self.events.append('''on_epoch_end''' ) def snake_case_ ( self : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Union[str, Any] , **snake_case : Optional[int] ): self.events.append('''on_step_begin''' ) def snake_case_ ( self : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , **snake_case : str ): self.events.append('''on_step_end''' ) def snake_case_ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Any , snake_case : List[Any] , **snake_case : str ): self.events.append('''on_evaluate''' ) def snake_case_ ( self : List[Any] , snake_case : str , snake_case : List[str] , snake_case : List[Any] , **snake_case : Dict ): self.events.append('''on_predict''' ) def snake_case_ ( self : Optional[int] , snake_case : Any , snake_case : int , snake_case : Tuple , **snake_case : int ): self.events.append('''on_save''' ) def snake_case_ ( self : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Any , **snake_case : Tuple ): self.events.append('''on_log''' ) def snake_case_ ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , **snake_case : Optional[Any] ): self.events.append('''on_prediction_step''' ) @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Any = tempfile.mkdtemp() def snake_case_ ( self : Optional[Any] ): shutil.rmtree(self.output_dir ) def snake_case_ ( self : Optional[Any] , snake_case : Tuple=0 , snake_case : Union[str, Any]=0 , snake_case : Optional[int]=64 , snake_case : Dict=64 , snake_case : Optional[Any]=None , snake_case : List[Any]=False , **snake_case : str ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. UpperCAmelCase_ :str = RegressionDataset(length=snake_case ) UpperCAmelCase_ :Optional[Any] = RegressionDataset(length=snake_case ) UpperCAmelCase_ :List[Any] = RegressionModelConfig(a=snake_case , b=snake_case ) UpperCAmelCase_ :str = RegressionPreTrainedModel(snake_case ) UpperCAmelCase_ :Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case ) return Trainer( snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , ) def snake_case_ ( self : str , snake_case : Tuple , snake_case : List[Any] ): self.assertEqual(len(snake_case ) , len(snake_case ) ) # Order doesn't matter UpperCAmelCase_ :Dict = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ ) UpperCAmelCase_ :Union[str, Any] = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ ) for cba, cba in zip(snake_case , snake_case ): if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ): self.assertEqual(snake_case , snake_case ) elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ): self.assertEqual(snake_case , cba.__class__ ) elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ): self.assertEqual(cba.__class__ , snake_case ) else: self.assertEqual(snake_case , snake_case ) def snake_case_ ( self : Any , snake_case : Dict ): UpperCAmelCase_ :List[Any] = ['''on_init_end''', '''on_train_begin'''] UpperCAmelCase_ :Dict = 0 UpperCAmelCase_ :Tuple = len(trainer.get_eval_dataloader() ) UpperCAmelCase_ :Dict = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(snake_case ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def snake_case_ ( self : int ): UpperCAmelCase_ :Dict = self.get_trainer() UpperCAmelCase_ :int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # Callbacks passed at init are added to the default callbacks UpperCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback UpperCAmelCase_ :Union[str, Any] = self.get_trainer(disable_tqdm=snake_case ) UpperCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) def snake_case_ ( self : List[str] ): UpperCAmelCase_ :List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] UpperCAmelCase_ :int = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(snake_case ) expected_callbacks.remove(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) UpperCAmelCase_ :Optional[int] = self.get_trainer() UpperCAmelCase_ :Dict = trainer.pop_callback(snake_case ) self.assertEqual(cb.__class__ , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) trainer.add_callback(snake_case ) expected_callbacks.insert(0 , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) # We can also add, pop, or remove by instance UpperCAmelCase_ :List[Any] = self.get_trainer() UpperCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(snake_case ) expected_callbacks.remove(snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) UpperCAmelCase_ :Dict = self.get_trainer() UpperCAmelCase_ :int = trainer.callback_handler.callbacks[0] UpperCAmelCase_ :List[str] = trainer.pop_callback(snake_case ) self.assertEqual(snake_case , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) trainer.add_callback(snake_case ) expected_callbacks.insert(0 , snake_case ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case ) def snake_case_ ( self : Tuple ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' , category=snake_case ) UpperCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() UpperCAmelCase_ :int = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # Independent log/save/eval UpperCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() UpperCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' ) trainer.train() UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) UpperCAmelCase_ :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' ) trainer.train() UpperCAmelCase_ :Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # A bit of everything UpperCAmelCase_ :List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , ) trainer.train() UpperCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case , self.get_expected_events(snake_case ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: UpperCAmelCase_ :str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(snake_case ) in warn_mock.call_args[0][0]
608
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = 'beit' def __init__( self , _lowerCamelCase=8192 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ) ->List[Any]: super().__init__(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = vocab_size SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : int = patch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : List[str] = use_mask_token SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_position_embeddings SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias SCREAMING_SNAKE_CASE : int = use_shared_relative_position_bias SCREAMING_SNAKE_CASE : Optional[Any] = layer_scale_init_value SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate SCREAMING_SNAKE_CASE : Any = use_mean_pooling # decode head attributes (semantic segmentation) SCREAMING_SNAKE_CASE : Tuple = out_indices SCREAMING_SNAKE_CASE : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE : str = use_auxiliary_head SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss_weight SCREAMING_SNAKE_CASE : List[Any] = auxiliary_channels SCREAMING_SNAKE_CASE : Tuple = auxiliary_num_convs SCREAMING_SNAKE_CASE : Tuple = auxiliary_concat_input SCREAMING_SNAKE_CASE : Dict = semantic_loss_ignore_index class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = version.parse('1.11' ) @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCAmelCase ( self ) ->float: return 1e-4
702
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class a_ ( a__ ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCamelCase , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(_lowerCamelCase , '''depth_multiplier''' ) ) class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.2_5 , _lowerCamelCase=8 , _lowerCamelCase=True , _lowerCamelCase=1024 , _lowerCamelCase=32 , _lowerCamelCase="relu6" , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , ) ->List[Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : Tuple = batch_size SCREAMING_SNAKE_CASE : str = num_channels SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier SCREAMING_SNAKE_CASE : Optional[Any] = min_depth SCREAMING_SNAKE_CASE : Union[str, Any] = tf_padding SCREAMING_SNAKE_CASE : Optional[Any] = int(last_hidden_size * depth_multiplier ) SCREAMING_SNAKE_CASE : Any = output_stride SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : List[str] = classifier_dropout_prob SCREAMING_SNAKE_CASE : int = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Any = num_labels SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Dict = scope def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __lowerCAmelCase ( self ) ->Any: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : str = MobileNetVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a_ ( a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Union[str, Any] = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : int = MobileNetVaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) ->List[str]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __lowerCAmelCase ( self ) ->str: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __lowerCAmelCase ( self ) ->Tuple: pass def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = outputs.hidden_states SCREAMING_SNAKE_CASE : Tuple = 26 self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def __lowerCAmelCase ( self ) ->List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Union[str, Any] = MobileNetVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCAmelCase ( self ) ->Any: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor SCREAMING_SNAKE_CASE : Any = prepare_img() SCREAMING_SNAKE_CASE : str = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
333
0
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar _lowerCAmelCase = TypeVar("""T""") _lowerCAmelCase = TypeVar("""U""") class __UpperCamelCase ( Generic[T, U] ): def __init__( self ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : List[Any] = key _lowerCAmelCase : Tuple = val _lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None _lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None def __repr__( self ): '''simple docstring''' return ( F"""Node: key: {self.key}, val: {self.val}, """ F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class __UpperCamelCase ( Generic[T, U] ): def __init__( self ): '''simple docstring''' _lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A ,_A ) _lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A ,_A ) _lowerCAmelCase : Optional[Any] = self.rear, self.head def __repr__( self ): '''simple docstring''' _lowerCAmelCase : str = ["DoubleLinkedList"] _lowerCAmelCase : Optional[int] = self.head while node.next is not None: rep.append(str(_A ) ) _lowerCAmelCase : Optional[int] = node.next rep.append(str(self.rear ) ) return ",\n ".join(_A ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Dict = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _lowerCAmelCase : Optional[int] = node _lowerCAmelCase : Optional[Any] = previous _lowerCAmelCase : List[str] = node _lowerCAmelCase : Any = self.rear def __lowerCamelCase ( self ,_A ): '''simple docstring''' if node.prev is None or node.next is None: return None _lowerCAmelCase : Dict = node.next _lowerCAmelCase : List[Any] = node.prev _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Optional[Any] = None return node class __UpperCamelCase ( Generic[T, U] ): _UpperCAmelCase = {} def __init__( self ,_A ): '''simple docstring''' _lowerCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList() _lowerCAmelCase : Optional[int] = capacity _lowerCAmelCase : List[str] = 0 _lowerCAmelCase : Dict = 0 _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ): '''simple docstring''' return ( F"""CacheInfo(hits={self.hits}, misses={self.miss}, """ F"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self ,_A ): '''simple docstring''' return key in self.cache def __lowerCamelCase ( self ,_A ): '''simple docstring''' if key in self.cache: self.hits += 1 _lowerCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key] _lowerCAmelCase : Any = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(_A ) return node.val self.miss += 1 return None def __lowerCamelCase ( self ,_A ,_A ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _lowerCAmelCase : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(_A ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _lowerCAmelCase : Optional[Any] = DoubleLinkedListNode(_A ,_A ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _lowerCAmelCase : Optional[int] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _lowerCAmelCase : Union[str, Any] = value self.list.add(_A ) @classmethod def __lowerCamelCase ( cls ,_A = 128 ): '''simple docstring''' def cache_decorator_inner(_A ) -> Callable[..., U]: def cache_decorator_wrapper(*_A ) -> U: if func not in cls.decorator_function_to_instance_map: _lowerCAmelCase : str = LRUCache(_A ) _lowerCAmelCase : Dict = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _lowerCAmelCase : Tuple = func(*_A ) cls.decorator_function_to_instance_map[func].put(args[0] ,_A ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(_A ,'cache_info' ,_A ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
259
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : int = batch_size UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : List[str] = embeddings_size UpperCAmelCase : Any = hidden_sizes UpperCAmelCase : int = depths UpperCAmelCase : List[str] = is_training UpperCAmelCase : List[str] = use_labels UpperCAmelCase : int = hidden_act UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : str = scope UpperCAmelCase : str = len(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = None if self.use_labels: UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case ) UpperCAmelCase : int = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case ) UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Optional[int] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = TFResNetModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def A_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self ): '''simple docstring''' return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case ): UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : List[str] = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase : str = layer_type UpperCAmelCase : Optional[Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : str = True check_hidden_states_output(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase : Union[str, Any] = self.default_image_processor UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : Any = model(**snake_case ) # verify the logits UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
679
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @require_torch def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" ) __UpperCAmelCase : List[str] = load_dataset("""ashraq/esc50""" ) __UpperCAmelCase : Tuple = dataset["""train"""]["""audio"""][-1]["""array"""] __UpperCAmelCase : str = audio_classifier(UpperCamelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog __UpperCAmelCase : Any = load_dataset("""ashraq/esc50""" ) __UpperCAmelCase : Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""] __UpperCAmelCase : Dict = audio_classifier(UpperCamelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ] , ) __UpperCAmelCase : Tuple = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) __UpperCAmelCase : str = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""" ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' pass
720
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 3 , _UpperCamelCase : int = 7 , _UpperCamelCase : int = 1_0_0_0_0_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Optional[int] = 1 for current_denominator in range(1 , limit + 1 ): __UpperCAmelCase : List[str] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __UpperCAmelCase : Union[str, Any] = current_numerator __UpperCAmelCase : List[Any] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=100_0000))
299
0
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A : Dict = '\\n Text data.\n Second line of data.' __A : Optional[int] = 'file' @pytest.fixture(scope="session" ) def snake_case__ ( _lowerCamelCase ) ->int: """simple docstring""" __lowercase : Dict = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") __lowercase : Optional[Any] = bytes(_lowerCamelCase, "utf-8" ) with zstd.open(_lowerCamelCase, "wb" ) as f: f.write(_lowerCamelCase ) return path @pytest.fixture def snake_case__ ( _lowerCamelCase ) ->str: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir, _lowerCamelCase ), "w" ) as f: f.write(_lowerCamelCase ) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[str]: """simple docstring""" __lowercase : List[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} __lowercase : Any = input_paths[compression_format] __lowercase : Dict = tmp_path / "cache" __lowercase : Any = DownloadConfig(cache_dir=_lowerCamelCase, extract_compressed_file=_lowerCamelCase ) __lowercase : Optional[int] = cached_path(_lowerCamelCase, download_config=_lowerCamelCase ) with open(_lowerCamelCase ) as f: __lowercase : Tuple = f.read() with open(_lowerCamelCase ) as f: __lowercase : Dict = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False] ) @pytest.mark.parametrize("default_cache_dir", [True, False] ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = "custom_cache" __lowercase : Any = "custom_extracted_dir" __lowercase : List[str] = tmp_path / "custom_extracted_path" if default_extracted: __lowercase : Any = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", _lowerCamelCase ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(_lowerCamelCase ) ) __lowercase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __lowercase : str = xz_file __lowercase : int = ( DownloadConfig(extract_compressed_file=_lowerCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=_lowerCamelCase ) ) __lowercase : Optional[int] = cached_path(_lowerCamelCase, download_config=_lowerCamelCase ) assert Path(_lowerCamelCase ).parent.parts[-2:] == expected def snake_case__ ( _lowerCamelCase ) ->Tuple: """simple docstring""" __lowercase : Optional[Any] = str(Path(_lowerCamelCase ).resolve() ) assert cached_path(_lowerCamelCase ) == text_file # relative path __lowercase : Any = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_lowerCamelCase ) == text_file def snake_case__ ( _lowerCamelCase ) ->List[Any]: """simple docstring""" __lowercase : Tuple = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(_lowerCamelCase ): cached_path(_lowerCamelCase ) # relative path __lowercase : str = "./__missing_file__.txt" with pytest.raises(_lowerCamelCase ): cached_path(_lowerCamelCase ) def snake_case__ ( _lowerCamelCase ) ->List[str]: """simple docstring""" __lowercase : str = get_from_cache(F'tmp://{tmpfs_file}' ) with open(_lowerCamelCase ) as f: __lowercase : List[str] = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", _lowerCamelCase ) def snake_case__ ( ) ->int: """simple docstring""" with pytest.raises(_lowerCamelCase ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", _lowerCamelCase ) def snake_case__ ( _lowerCamelCase ) ->List[Any]: """simple docstring""" __lowercase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_lowerCamelCase ): http_get("https://huggingface.co", temp_file=_lowerCamelCase ) with pytest.raises(_lowerCamelCase ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", _lowerCamelCase ) def snake_case__ ( _lowerCamelCase ) ->List[Any]: """simple docstring""" __lowercase : Tuple = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_lowerCamelCase ): ftp_get("ftp://huggingface.co", temp_file=_lowerCamelCase ) with pytest.raises(_lowerCamelCase ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", _lowerCamelCase ) def snake_case__ ( _lowerCamelCase ) ->Optional[Any]: """simple docstring""" __lowercase : Any = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_lowerCamelCase ): fsspec_get("s3://huggingface.co", temp_file=_lowerCamelCase ) with pytest.raises(_lowerCamelCase ): fsspec_head("s3://huggingface.co" )
575
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __A : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
575
1
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __magic_name__ ( *__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' with open(__UpperCAmelCase, '''r''' ) as fh: fcntl.flock(__UpperCAmelCase, fcntl.LOCK_EX ) try: print(*__UpperCAmelCase ) finally: fcntl.flock(__UpperCAmelCase, fcntl.LOCK_UN ) a : Optional[Any] = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) a : Optional[int] = torch.device('cuda', local_rank) a : List[Any] = socket.gethostname() a : str = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a : Optional[int] = dist.get_rank() a : Any = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
593
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
593
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() A_ : str = logging.get_logger(__name__) A_ : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } A_ : Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: for attribute in key.split('.' ): UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: UpperCamelCase_: List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase_: int = value elif weight_type == "weight_g": UpperCamelCase_: int = value elif weight_type == "weight_v": UpperCamelCase_: Dict = value elif weight_type == "bias": UpperCamelCase_: Union[str, Any] = value else: UpperCamelCase_: Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: List[str] = [] UpperCamelCase_: Dict = fairseq_model.state_dict() UpperCamelCase_: Optional[int] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase_: Tuple = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase_: str = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase_: int = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue UpperCamelCase_: List[Any] = True if "*" in mapped_key: UpperCamelCase_: Union[str, Any] = name.split(UpperCAmelCase__ )[0].split('.' )[-2] UpperCamelCase_: int = mapped_key.replace('*' , UpperCAmelCase__ ) if "weight_g" in name: UpperCamelCase_: Union[str, Any] = 'weight_g' elif "weight_v" in name: UpperCamelCase_: List[str] = 'weight_v' elif "bias" in name: UpperCamelCase_: Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase_: List[str] = 'weight' else: UpperCamelCase_: List[str] = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1] UpperCamelCase_: Optional[int] = name.split('.' ) UpperCamelCase_: Union[str, Any] = int(items[0] ) UpperCamelCase_: Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase_: Dict = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase_: Optional[Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase_: List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase_: str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) @torch.no_grad() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True ) -> str: if config_path is not None: UpperCamelCase_: int = UniSpeechSatConfig.from_pretrained(UpperCAmelCase__ ) else: UpperCamelCase_: Tuple = UniSpeechSatConfig() UpperCamelCase_: List[str] = '' if is_finetuned: UpperCamelCase_: List[str] = UniSpeechSatForCTC(UpperCAmelCase__ ) else: UpperCamelCase_: int = UniSpeechSatForPreTraining(UpperCAmelCase__ ) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) UpperCamelCase_: Dict = model[0].eval() recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) A_ : Optional[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
57
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Union[List[PIL.Image.Image], np.ndarray] a : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
57
1
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowercase = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE_ ( _lowercase): '''simple docstring''' __magic_name__ : List[str] = '''sequence-classification''' def __init__( self , lowerCamelCase__) -> int: '''simple docstring''' if type(lowerCamelCase__) == dict: snake_case__ : Tuple = Namespace(**lowerCamelCase__) snake_case__ : Tuple = glue_output_modes[hparams.task] snake_case__ : str = glue_tasks_num_labels[hparams.task] super().__init__(lowerCamelCase__ , lowerCamelCase__ , self.mode) def UpperCAmelCase ( self , **lowerCamelCase__) -> Optional[int]: '''simple docstring''' return self.model(**lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Tuple: '''simple docstring''' snake_case__ : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case__ : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None snake_case__ : str = self(**lowerCamelCase__) snake_case__ : Tuple = outputs[0] snake_case__ : List[str] = self.trainer.lr_schedulers[0]["scheduler"] snake_case__ : Any = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' snake_case__ : Optional[int] = self.hparams snake_case__ : Optional[Any] = processors[args.task]() snake_case__ : Optional[Any] = processor.get_labels() for mode in ["train", "dev"]: snake_case__ : Union[str, Any] = self._feature_file(lowerCamelCase__) if os.path.exists(lowerCamelCase__) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowerCamelCase__) else: logger.info("Creating features from dataset file at %s" , args.data_dir) snake_case__ : Dict = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) snake_case__ : Union[str, Any] = convert_examples_to_features( lowerCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , lowerCamelCase__) torch.save(lowerCamelCase__ , lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False) -> DataLoader: '''simple docstring''' snake_case__ : str = "dev" if mode == "test" else mode snake_case__ : Union[str, Any] = self._feature_file(lowerCamelCase__) logger.info("Loading features from cached file %s" , lowerCamelCase__) snake_case__ : Union[str, Any] = torch.load(lowerCamelCase__) snake_case__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long) snake_case__ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) snake_case__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": snake_case__ : Dict = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": snake_case__ : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , ) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Optional[int]: '''simple docstring''' snake_case__ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case__ : Optional[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None snake_case__ : str = self(**lowerCamelCase__) snake_case__, snake_case__ : int = outputs[:2] snake_case__ : str = logits.detach().cpu().numpy() snake_case__ : List[str] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCAmelCase ( self , lowerCamelCase__) -> tuple: '''simple docstring''' snake_case__ : Any = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() snake_case__ : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": snake_case__ : str = np.argmax(lowerCamelCase__ , axis=1) elif self.hparams.glue_output_mode == "regression": snake_case__ : Optional[int] = np.squeeze(lowerCamelCase__) snake_case__ : int = np.concatenate([x["target"] for x in outputs] , axis=0) snake_case__ : List[str] = [[] for _ in range(out_label_ids.shape[0])] snake_case__ : str = [[] for _ in range(out_label_ids.shape[0])] snake_case__ : Any = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCamelCase__ , lowerCamelCase__)} snake_case__ : Tuple = dict(results.items()) snake_case__ : Optional[int] = results return ret, preds_list, out_label_list def UpperCAmelCase ( self , lowerCamelCase__) -> dict: '''simple docstring''' snake_case__, snake_case__, snake_case__ : Optional[Any] = self._eval_end(lowerCamelCase__) snake_case__ : Dict = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCAmelCase ( self , lowerCamelCase__) -> dict: '''simple docstring''' snake_case__, snake_case__, snake_case__ : int = self._eval_end(lowerCamelCase__) snake_case__ : int = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__) -> int: '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__) parser.add_argument( "--max_seq_length" , default=128 , type=lowerCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=lowerCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets") return parser def A__ ( ) -> Optional[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) snake_case__ : Dict = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) snake_case__ : Optional[int] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: snake_case__ : Dict = os.path.join( "./results" , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , ) os.makedirs(args.output_dir ) snake_case__ : Union[str, Any] = GLUETransformer(_UpperCAmelCase ) snake_case__ : int = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: snake_case__ : Dict = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) snake_case__ : str = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
150
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase = logging.get_logger(__name__) def A__ ( _UpperCAmelCase : Optional[int] ) -> int: '''simple docstring''' snake_case__ : str = DPTConfig(embedding_type="hybrid" ) if "large" in checkpoint_url: snake_case__ : Union[str, Any] = 10_24 snake_case__ : Tuple = 40_96 snake_case__ : Any = 24 snake_case__ : Optional[Any] = 16 snake_case__ : Union[str, Any] = [5, 11, 17, 23] snake_case__ : List[str] = [2_56, 5_12, 10_24, 10_24] snake_case__ : int = (1, 3_84, 3_84) if "nyu" or "midas" in checkpoint_url: snake_case__ : Optional[Any] = 7_68 snake_case__ : Any = [1, 1, 1, 0.5] snake_case__ : str = [2_56, 5_12, 7_68, 7_68] snake_case__ : Tuple = 1_50 snake_case__ : Optional[Any] = 16 snake_case__ : Tuple = (1, 3_84, 3_84) snake_case__ : int = False snake_case__ : List[Any] = "project" if "ade" in checkpoint_url: snake_case__ : str = True snake_case__ : Optional[Any] = 7_68 snake_case__ : List[Any] = [1, 1, 1, 0.5] snake_case__ : Tuple = 1_50 snake_case__ : Tuple = 16 snake_case__ : Tuple = "huggingface/label-files" snake_case__ : Optional[Any] = "ade20k-id2label.json" snake_case__ : Any = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) ) , "r" ) ) snake_case__ : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} snake_case__ : str = idalabel snake_case__ : Any = {v: k for k, v in idalabel.items()} snake_case__ : Any = [1, 1_50, 4_80, 4_80] return config, expected_shape def A__ ( _UpperCAmelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case__ : str = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def A__ ( _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case__ : Optional[Any] = name.replace("pretrained.model" , "dpt.encoder" ) if "pretrained.model" in name: snake_case__ : Tuple = name.replace("pretrained.model" , "dpt.embeddings" ) if "patch_embed" in name: snake_case__ : int = name.replace("patch_embed" , "" ) if "pos_embed" in name: snake_case__ : Any = name.replace("pos_embed" , "position_embeddings" ) if "attn.proj" in name: snake_case__ : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "proj" in name and "project" not in name: snake_case__ : List[Any] = name.replace("proj" , "projection" ) if "blocks" in name: snake_case__ : Dict = name.replace("blocks" , "layer" ) if "mlp.fc1" in name: snake_case__ : Any = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: snake_case__ : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) if "norm1" in name and "backbone" not in name: snake_case__ : Optional[int] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name and "backbone" not in name: snake_case__ : List[str] = name.replace("norm2" , "layernorm_after" ) if "scratch.output_conv" in name: snake_case__ : Any = name.replace("scratch.output_conv" , "head" ) if "scratch" in name: snake_case__ : Union[str, Any] = name.replace("scratch" , "neck" ) if "layer1_rn" in name: snake_case__ : Optional[int] = name.replace("layer1_rn" , "convs.0" ) if "layer2_rn" in name: snake_case__ : str = name.replace("layer2_rn" , "convs.1" ) if "layer3_rn" in name: snake_case__ : Union[str, Any] = name.replace("layer3_rn" , "convs.2" ) if "layer4_rn" in name: snake_case__ : str = name.replace("layer4_rn" , "convs.3" ) if "refinenet" in name: snake_case__ : Union[str, Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case__ : List[str] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: snake_case__ : Union[str, Any] = name.replace("out_conv" , "projection" ) if "resConfUnit1" in name: snake_case__ : Any = name.replace("resConfUnit1" , "residual_layer1" ) if "resConfUnit2" in name: snake_case__ : str = name.replace("resConfUnit2" , "residual_layer2" ) if "conv1" in name: snake_case__ : List[Any] = name.replace("conv1" , "convolution1" ) if "conv2" in name: snake_case__ : Tuple = name.replace("conv2" , "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case__ : Dict = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case__ : Tuple = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case__ : Dict = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: snake_case__ : Dict = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: snake_case__ : List[str] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: snake_case__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: snake_case__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: snake_case__ : Optional[Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: snake_case__ : Union[str, Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: snake_case__ : int = name.replace("pretrained" , "dpt" ) if "bn" in name: snake_case__ : Optional[Any] = name.replace("bn" , "batch_norm" ) if "head" in name: snake_case__ : int = name.replace("head" , "head.head" ) if "encoder.norm" in name: snake_case__ : List[Any] = name.replace("encoder.norm" , "layernorm" ) if "auxlayer" in name: snake_case__ : Optional[Any] = name.replace("auxlayer" , "auxiliary_head.head" ) if "backbone" in name: snake_case__ : Dict = name.replace("backbone" , "backbone.bit.encoder" ) if ".." in name: snake_case__ : Optional[int] = name.replace(".." , "." ) if "stem.conv" in name: snake_case__ : Optional[Any] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: snake_case__ : List[str] = name.replace("blocks" , "layers" ) if "convolution" in name and "backbone" in name: snake_case__ : Optional[Any] = name.replace("convolution" , "conv" ) if "layer" in name and "backbone" in name: snake_case__ : Optional[int] = name.replace("layer" , "layers" ) if "backbone.bit.encoder.bit" in name: snake_case__ : str = name.replace("backbone.bit.encoder.bit" , "backbone.bit" ) if "embedder.conv" in name: snake_case__ : int = name.replace("embedder.conv" , "embedder.convolution" ) if "backbone.bit.encoder.stem.norm" in name: snake_case__ : Optional[int] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" ) return name def A__ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> str: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) snake_case__ : Optional[int] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Dict = in_proj_weight[: config.hidden_size, :] snake_case__ : str = in_proj_bias[: config.hidden_size] snake_case__ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : List[Any] = in_proj_bias[-config.hidden_size :] def A__ ( ) -> int: '''simple docstring''' snake_case__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case__ : Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Tuple: '''simple docstring''' snake_case__, snake_case__ : Tuple = get_dpt_config(_UpperCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") snake_case__ : Optional[int] = torch.load(_UpperCAmelCase , map_location="cpu" ) # remove certain keys remove_ignore_keys_(_UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case__ : Dict = state_dict.pop(_UpperCAmelCase ) snake_case__ : str = val # read in qkv matrices read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase ) # load HuggingFace model snake_case__ : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() # Check outputs on an image snake_case__ : str = 4_80 if "ade" in checkpoint_url else 3_84 snake_case__ : int = DPTImageProcessor(size=_UpperCAmelCase ) snake_case__ : Tuple = prepare_img() snake_case__ : int = image_processor(_UpperCAmelCase , return_tensors="pt" ) # forward pass snake_case__ : Optional[Any] = model(**_UpperCAmelCase ).logits if "ade" in checkpoint_url else model(**_UpperCAmelCase ).predicted_depth if show_prediction: snake_case__ : Optional[int] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_UpperCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 2_55 ).show() if pytorch_dump_folder_path is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub("ybelkada/dpt-hybrid-midas" ) image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) parser.add_argument( """--show_prediction""", action="""store_true""", ) lowercase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
150
1
'''simple docstring''' import datasets from .evaluate import evaluate _lowercase : Any = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ _lowercase : Union[str, Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ _lowercase : Optional[int] = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__( datasets.Metric ): def a__( self : Any )-> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def a__( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} UpperCAmelCase = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] UpperCAmelCase = evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase ) return score
210
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def lowerCamelCase__ ( A : str ): '''simple docstring''' UpperCAmelCase = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' f"""{test_file} instead.""" ) UpperCAmelCase = components[-1] if not test_fn.endswith('''py''' ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )] UpperCAmelCase = '''.'''.join(A ) return test_module_path def lowerCamelCase__ ( A : Any ): '''simple docstring''' UpperCAmelCase = get_module_path(A ) UpperCAmelCase = importlib.import_module(A ) return test_module def lowerCamelCase__ ( A : Tuple ): '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = get_test_module(A ) for attr in dir(A ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(A , A ) ) # sort with class names return sorted(A , key=lambda A : x.__name__ ) def lowerCamelCase__ ( A : Any ): '''simple docstring''' UpperCAmelCase = [] UpperCAmelCase = get_test_module(A ) for attr in dir(A ): UpperCAmelCase = getattr(A , A ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). UpperCAmelCase = getattr(A , '''all_model_classes''' , [] ) if len(A ) > 0: test_classes.append(A ) # sort with class names return sorted(A , key=lambda A : x.__name__ ) def lowerCamelCase__ ( A : int ): '''simple docstring''' UpperCAmelCase = get_test_classes(A ) UpperCAmelCase = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(A , key=lambda A : x.__name__ ) def lowerCamelCase__ ( A : Optional[Any] ): '''simple docstring''' UpperCAmelCase = test_class() if hasattr(A , '''setUp''' ): test.setUp() UpperCAmelCase = None if hasattr(A , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: UpperCAmelCase = test.model_tester.__class__ return model_tester def lowerCamelCase__ ( A : Tuple , A : int ): '''simple docstring''' UpperCAmelCase = get_test_classes(A ) UpperCAmelCase = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(A ) # sort with class names return sorted(A , key=lambda A : x.__name__ ) def lowerCamelCase__ ( A : Any , A : Tuple ): '''simple docstring''' UpperCAmelCase = get_test_classes_for_model(A , A ) UpperCAmelCase = [] for test_class in test_classes: UpperCAmelCase = get_model_tester_from_test_class(A ) if tester_class is not None: tester_classes.append(A ) # sort with class names return sorted(A , key=lambda A : x.__name__ ) def lowerCamelCase__ ( A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = get_test_classes(A ) UpperCAmelCase = {test_class: get_model_tester_from_test_class(A ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase__ ( A : Any ): '''simple docstring''' UpperCAmelCase = get_model_classes(A ) UpperCAmelCase = { model_class: get_test_classes_for_model(A , A ) for model_class in model_classes } return model_test_mapping def lowerCamelCase__ ( A : int ): '''simple docstring''' UpperCAmelCase = get_model_classes(A ) UpperCAmelCase = { model_class: get_tester_classes_for_model(A , A ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase__ ( A : Dict ): '''simple docstring''' if isinstance(A , A ): return o elif isinstance(A , A ): return o.__name__ elif isinstance(A , (list, tuple) ): return [to_json(A ) for x in o] elif isinstance(A , A ): return {to_json(A ): to_json(A ) for k, v in o.items()} else: return o
210
1
import re def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> list: '''simple docstring''' return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )] def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : Tuple = split_input(str_ ) return "".join( [''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ) -> str: '''simple docstring''' try: _UpperCamelCase : Optional[Any] = split_input(lowerCamelCase__ ) if upper: _UpperCamelCase : Tuple = ''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: _UpperCamelCase : Optional[Any] = ''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> str: '''simple docstring''' return to_simple_case(lowerCamelCase__ ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> str: '''simple docstring''' try: _UpperCamelCase : str = to_simple_case(lowerCamelCase__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> str: '''simple docstring''' return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '_' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> str: '''simple docstring''' return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '-' ) if __name__ == "__main__": __import__("""doctest""").testmod()
721
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
0
import os def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" with open(os.path.dirname(_A ) + '''/p022_names.txt''' ) as file: snake_case_ : Dict = str(file.readlines()[0] ) snake_case_ : Any = names.replace('''"''' , '''''' ).split(''',''' ) names.sort() snake_case_ : Dict = 0 snake_case_ : List[Any] = 0 for i, name in enumerate(_A ): for letter in name: name_score += ord(_A ) - 64 total_score += (i + 1) * name_score snake_case_ : Union[str, Any] = 0 return total_score if __name__ == "__main__": print(solution())
60
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCAmelCase__ ( A__ ): """simple docstring""" a = 42 a = None def UpperCAmelCase_ ( _A , _A=0.9_9_9 , _A="cosine" , ): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -1_2.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) SCREAMING_SNAKE_CASE__ = [] for i in range(_A ): SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class UpperCAmelCase__ ( A__ , A__ ): """simple docstring""" a = 1 @register_to_config def __init__( self : Dict , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : float = 1.0 , **__lowerCamelCase : str , ) -> Optional[Any]: if kwargs.get('''set_alpha_to_one''' , __lowerCamelCase ) is not None: SCREAMING_SNAKE_CASE__ = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , __lowerCamelCase , standard_warn=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs['''set_alpha_to_one'''] if trained_betas is not None: SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(__lowerCamelCase ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) SCREAMING_SNAKE_CASE__ = 1.0 - self.betas SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. SCREAMING_SNAKE_CASE__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE__ = 1.0 # setable values SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) ) def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ) -> torch.FloatTensor: return sample def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ) -> Union[str, Any]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) SCREAMING_SNAKE_CASE__ = num_inference_steps SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) self.timesteps += self.config.steps_offset def lowercase_ ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) SCREAMING_SNAKE_CASE__ = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process SCREAMING_SNAKE_CASE__ = self.alphas_cumprod[timestep] SCREAMING_SNAKE_CASE__ = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 SCREAMING_SNAKE_CASE__ = model_output elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE__ = model_output SCREAMING_SNAKE_CASE__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf SCREAMING_SNAKE_CASE__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def __len__( self : List[str] ) -> Union[str, Any]: return self.config.num_train_timesteps
493
0
import string import numpy def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> int: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , UpperCamelCase_ ) class __SCREAMING_SNAKE_CASE : _SCREAMING_SNAKE_CASE : Any = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _SCREAMING_SNAKE_CASE : Optional[Any] = numpy.vectorize(lambda __lowercase: x % 36) _SCREAMING_SNAKE_CASE : Dict = numpy.vectorize(SCREAMING_SNAKE_CASE__) def __init__( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowerCAmelCase__ = encrypt_key.shape[0] def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return self.key_string.index(snake_case__ ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return self.key_string[round(snake_case__ )] def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCAmelCase__ = det % len(self.key_string ) lowerCAmelCase__ = len(self.key_string ) if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1: lowerCAmelCase__ = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(snake_case__ ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = [char for char in text.upper() if char in self.key_string] lowerCAmelCase__ = chars[-1] while len(snake_case__ ) % self.break_key != 0: chars.append(snake_case__ ) return "".join(snake_case__ ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self.process_text(text.upper() ) lowerCAmelCase__ = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): lowerCAmelCase__ = text[i : i + self.break_key] lowerCAmelCase__ = [self.replace_letters(snake_case__ ) for char in batch] lowerCAmelCase__ = numpy.array([vec] ).T lowerCAmelCase__ = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[ 0 ] lowerCAmelCase__ = "".join( self.replace_digits(snake_case__ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCAmelCase__ = det % len(self.key_string ) lowerCAmelCase__ = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowerCAmelCase__ = i break lowerCAmelCase__ = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(snake_case__ ) ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self.make_decrypt_key() lowerCAmelCase__ = self.process_text(text.upper() ) lowerCAmelCase__ = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): lowerCAmelCase__ = text[i : i + self.break_key] lowerCAmelCase__ = [self.replace_letters(snake_case__ ) for char in batch] lowerCAmelCase__ = numpy.array([vec] ).T lowerCAmelCase__ = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0] lowerCAmelCase__ = "".join( self.replace_digits(snake_case__ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def _UpperCamelCase ( ) -> None: """simple docstring""" lowerCAmelCase__ = int(input('Enter the order of the encryption key: ' ) ) lowerCAmelCase__ = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = [int(UpperCamelCase_ ) for x in input().split()] hill_matrix.append(UpperCamelCase_ ) lowerCAmelCase__ = HillCipher(numpy.array(UpperCamelCase_ ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) lowerCAmelCase__ = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": lowerCAmelCase__ = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(UpperCamelCase_ ) ) elif option == "2": lowerCAmelCase__ = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(UpperCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
719
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __snake_case : Any = logging.get_logger(__name__) __snake_case : Tuple = {"""tokenizer_file""": """tokenizer.json"""} __snake_case : str = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Union[str, Any] = ['''input_ids''', '''attention_mask'''] _SCREAMING_SNAKE_CASE : Union[str, Any] = None def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<unk>" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase=False , _UpperCamelCase=False , **_UpperCamelCase , ): """simple docstring""" super().__init__( _UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _UpperCamelCase ) != add_prefix_space: lowerCAmelCase__ = getattr(_UpperCamelCase , pre_tok_state.pop('type' ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**_UpperCamelCase ) lowerCAmelCase__ = add_prefix_space def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" ' pretokenized inputs.' ) return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" ' pretokenized inputs.' ) return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" lowerCAmelCase__ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] ) if len(_UpperCamelCase ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] return input_ids
365
0
'''simple docstring''' def A (__lowerCamelCase :int , __lowerCamelCase :int ): if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) _lowerCAmelCase = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b" _lowerCAmelCase = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b" _lowerCAmelCase = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
507
0
"""simple docstring""" import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') if is_sentencepiece_available(): import sentencepiece as sp _UpperCamelCase : List[str] = 5 _UpperCamelCase : int = 10 @require_sentencepiece @require_tokenizers class snake_case ( UpperCAmelCase , unittest.TestCase ): __magic_name__ = SpeechaTextTokenizer __magic_name__ = False __magic_name__ = True def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().setUp() a : Tuple = sp.SentencePieceProcessor() spm_model.Load(A ) a : Tuple = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(A ) )] a : List[str] = dict(zip(A , range(len(A ) ) ) ) a : Tuple = Path(self.tmpdirname ) save_json(A , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(A , save_dir / VOCAB_FILES_NAMES['spm_file'] ) a : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' a : Optional[Any] = '<pad>' a : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(A ) , 1_0_0_1 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' a : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) a : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , ) a : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) a : Optional[Any] = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] ) a : Any = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def lowerCamelCase__ ( self : str ): '''simple docstring''' a : str = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class snake_case ( unittest.TestCase ): __magic_name__ = '''valhalla/s2t_mustc_multilinguial_medium''' __magic_name__ = '''C\'est trop cool''' __magic_name__ = '''Esto es genial''' @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' a : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1 ) def lowerCamelCase__ ( self : str ): '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' self.assertIn(A , self.tokenizer.all_special_ids ) a : Tuple = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2] a : Optional[Any] = self.tokenizer.decode(A , skip_special_tokens=A ) a : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' a : Optional[Any] = 'fr' a : Any = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , A ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' a : List[Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) a : Optional[int] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
118
"""simple docstring""" def snake_case (A_ :int , A_ :int ): '''simple docstring''' return base * power(A_ , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('Raise base to the power of exponent using recursion...') _UpperCamelCase : Any = int(input('Enter the base: ').strip()) _UpperCamelCase : str = int(input('Enter the exponent: ').strip()) _UpperCamelCase : int = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents _UpperCamelCase : Union[str, Any] = 1 / result print(f'''{base} to the power of {exponent} is {result}''')
118
1