code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class A__ ( __snake_case ): def __init__( self , *A_ , A_=None , A_=None , **A_ ): '''simple docstring''' super().__init__(*A_ , **A_ ) UpperCamelCase : str = eval_examples UpperCamelCase : int = post_process_function def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_ = "eval" ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : str = self.get_eval_dataloader(A_ ) UpperCamelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Dict = self.compute_metrics UpperCamelCase : Dict = None UpperCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : List[str] = time.time() try: UpperCamelCase : Optional[int] = eval_loop( A_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , ) finally: UpperCamelCase : Optional[Any] = compute_metrics UpperCamelCase : Tuple = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Tuple = self.post_process_function(A_ , A_ , output.predictions ) UpperCamelCase : Tuple = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCamelCase : Tuple = metrics.pop(A_ ) metrics.update(output.metrics ) else: UpperCamelCase : Union[str, Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ ) return metrics def __UpperCamelCase( self , A_ , A_ , A_=None , A_ = "test" ): '''simple docstring''' UpperCamelCase : Any = self.get_test_dataloader(A_ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Tuple = time.time() try: UpperCamelCase : List[Any] = eval_loop( A_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , ) finally: UpperCamelCase : List[str] = compute_metrics UpperCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : List[str] = self.post_process_function(A_ , A_ , output.predictions , "predict" ) UpperCamelCase : List[str] = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCamelCase : str = metrics.pop(A_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
52
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Tuple = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class A__ ( __snake_case ): def __init__( self , A_=None , A_=None , *A_ , **A_ ): '''simple docstring''' super().__init__(*A_ , **A_ ) if config is None: assert isinstance(self.model , A_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) UpperCamelCase : Optional[Any] = self.model.config else: UpperCamelCase : int = config UpperCamelCase : Tuple = data_args UpperCamelCase : List[str] = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" " padding.." ) if self.args.label_smoothing == 0: UpperCamelCase : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss UpperCamelCase : Optional[Any] = label_smoothed_nll_loss def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.optimizer is None: UpperCamelCase : int = ["bias", "LayerNorm.weight"] UpperCamelCase : Any = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] UpperCamelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: UpperCamelCase : Dict = Adafactor UpperCamelCase : List[Any] = {"scale_parameter": False, "relative_step": False} else: UpperCamelCase : Optional[int] = AdamW UpperCamelCase : List[Any] = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } UpperCamelCase : Optional[int] = self.args.learning_rate if self.sharded_ddp: UpperCamelCase : Dict = OSS( params=A_ , optim=A_ , **A_ , ) else: UpperCamelCase : Dict = optimizer_cls(A_ , **A_ ) if self.lr_scheduler is None: UpperCamelCase : int = self._get_lr_scheduler(A_ ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": UpperCamelCase : Any = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": UpperCamelCase : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: UpperCamelCase : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ ) return scheduler def __UpperCamelCase( self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token UpperCamelCase : List[Any] = model(**A_ , use_cache=A_ )[0] UpperCamelCase : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models UpperCamelCase , UpperCamelCase : str = model(**A_ , labels=A_ , use_cache=A_ )[:2] else: # compute label smoothed loss UpperCamelCase : Tuple = model(**A_ , use_cache=A_ )[0] UpperCamelCase : List[str] = torch.nn.functional.log_softmax(A_ , dim=-1 ) UpperCamelCase , UpperCamelCase : Dict = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = inputs.pop("labels" ) UpperCamelCase , UpperCamelCase : int = self._compute_loss(A_ , A_ , A_ ) return loss def __UpperCamelCase( self , A_ , A_ , A_ , A_ = None , ): '''simple docstring''' UpperCamelCase : str = self._prepare_inputs(A_ ) UpperCamelCase : Optional[Any] = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: UpperCamelCase : Optional[Any] = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: UpperCamelCase : Tuple = self._pad_tensors_to_max_len(A_ , gen_kwargs["max_length"] ) UpperCamelCase : List[str] = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data UpperCamelCase , UpperCamelCase : str = self._compute_loss(A_ , A_ , A_ ) UpperCamelCase : Dict = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) UpperCamelCase : Optional[int] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: UpperCamelCase : Dict = self._pad_tensors_to_max_len(A_ , gen_kwargs["max_length"] ) return (loss, logits, labels) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F""" padded to `max_length`={max_length}""" ) UpperCamelCase : List[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) UpperCamelCase : Dict = tensor return padded_tensor
52
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
1
from __future__ import annotations def A_ ( _lowerCAmelCase ) -> list[int]: # This function is recursive UpperCamelCase : Union[str, Any] = len(_lowerCAmelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else UpperCamelCase : Tuple = array[0] UpperCamelCase : int = False UpperCamelCase : int = 1 UpperCamelCase : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: UpperCamelCase : int = True UpperCamelCase : Dict = [element for element in array[i:] if element >= array[i]] UpperCamelCase : List[Any] = longest_subsequence(_lowerCAmelCase ) if len(_lowerCAmelCase ) > len(_lowerCAmelCase ): UpperCamelCase : List[str] = temp_array else: i += 1 UpperCamelCase : Any = [element for element in array[1:] if element >= pivot] UpperCamelCase : List[str] = [pivot, *longest_subsequence(_lowerCAmelCase )] if len(_lowerCAmelCase ) > len(_lowerCAmelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
52
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
1
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = (EulerDiscreteScheduler,) _UpperCAmelCase :Optional[Any] = 1_0 def __UpperCamelCase( self , **A_ ): '''simple docstring''' UpperCamelCase : Any = { "num_train_timesteps": 1100, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**A_ ) return config def __UpperCamelCase( self ): '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.scheduler_classes[0] UpperCamelCase : Dict = self.get_scheduler_config() UpperCamelCase : str = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase : List[str] = torch.manual_seed(0 ) UpperCamelCase : int = self.dummy_model() UpperCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase : int = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase : Dict = scheduler.scale_model_input(A_ , A_ ) UpperCamelCase : Optional[Any] = model(A_ , A_ ) UpperCamelCase : Union[str, Any] = scheduler.step(A_ , A_ , A_ , generator=A_ ) UpperCamelCase : Any = output.prev_sample UpperCamelCase : Optional[int] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : List[Any] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.scheduler_classes[0] UpperCamelCase : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" ) UpperCamelCase : Dict = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase : Any = torch.manual_seed(0 ) UpperCamelCase : Tuple = self.dummy_model() UpperCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase : Dict = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase : Any = scheduler.scale_model_input(A_ , A_ ) UpperCamelCase : List[Any] = model(A_ , A_ ) UpperCamelCase : Optional[int] = scheduler.step(A_ , A_ , A_ , generator=A_ ) UpperCamelCase : List[Any] = output.prev_sample UpperCamelCase : List[str] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Tuple = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 0.00_02 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.scheduler_classes[0] UpperCamelCase : Optional[Any] = self.get_scheduler_config() UpperCamelCase : List[Any] = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps , device=A_ ) UpperCamelCase : Optional[Any] = torch.manual_seed(0 ) UpperCamelCase : List[str] = self.dummy_model() UpperCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase : str = sample.to(A_ ) for t in scheduler.timesteps: UpperCamelCase : Dict = scheduler.scale_model_input(A_ , A_ ) UpperCamelCase : List[Any] = model(A_ , A_ ) UpperCamelCase : Optional[int] = scheduler.step(A_ , A_ , A_ , generator=A_ ) UpperCamelCase : Dict = output.prev_sample UpperCamelCase : Tuple = torch.sum(torch.abs(A_ ) ) UpperCamelCase : List[str] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.scheduler_classes[0] UpperCamelCase : Tuple = self.get_scheduler_config() UpperCamelCase : Optional[int] = scheduler_class(**A_ , use_karras_sigmas=A_ ) scheduler.set_timesteps(self.num_inference_steps , device=A_ ) UpperCamelCase : Any = torch.manual_seed(0 ) UpperCamelCase : str = self.dummy_model() UpperCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase : Union[str, Any] = sample.to(A_ ) for t in scheduler.timesteps: UpperCamelCase : Any = scheduler.scale_model_input(A_ , A_ ) UpperCamelCase : List[Any] = model(A_ , A_ ) UpperCamelCase : Union[str, Any] = scheduler.step(A_ , A_ , A_ , generator=A_ ) UpperCamelCase : str = output.prev_sample UpperCamelCase : Optional[Any] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : List[str] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2 assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
1
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __lowerCamelCase : Dict = logging.get_logger(__name__) class A__ : def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = question_encoder UpperCamelCase : int = generator UpperCamelCase : Optional[int] = self.question_encoder def __UpperCamelCase( self , A_ ): '''simple docstring''' if os.path.isfile(A_ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(A_ , exist_ok=A_ ) UpperCamelCase : List[Any] = os.path.join(A_ , "question_encoder_tokenizer" ) UpperCamelCase : Union[str, Any] = os.path.join(A_ , "generator_tokenizer" ) self.question_encoder.save_pretrained(A_ ) self.generator.save_pretrained(A_ ) @classmethod def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer UpperCamelCase : str = kwargs.pop("config" , A_ ) if config is None: UpperCamelCase : List[Any] = RagConfig.from_pretrained(A_ ) UpperCamelCase : Tuple = AutoTokenizer.from_pretrained( A_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) UpperCamelCase : str = AutoTokenizer.from_pretrained( A_ , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=A_ , generator=A_ ) def __call__( self , *A_ , **A_ ): '''simple docstring''' return self.current_tokenizer(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.generator.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.generator.decode(*A_ , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.question_encoder def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.generator def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = "longest" , A_ = None , A_ = True , **A_ , ): '''simple docstring''' warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , A_ , ) if max_length is None: UpperCamelCase : str = self.current_tokenizer.model_max_length UpperCamelCase : Dict = self( A_ , add_special_tokens=A_ , return_tensors=A_ , max_length=A_ , padding=A_ , truncation=A_ , **A_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCamelCase : Optional[int] = self.current_tokenizer.model_max_length UpperCamelCase : str = self( text_target=A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , max_length=A_ , truncation=A_ , **A_ , ) UpperCamelCase : List[str] = labels["input_ids"] return model_inputs
52
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : Optional[Any] = 25_6047 __lowerCamelCase : List[str] = 25_6145 @require_sentencepiece @require_tokenizers class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Dict = NllbTokenizer _UpperCAmelCase :List[str] = NllbTokenizerFast _UpperCAmelCase :Any = True _UpperCAmelCase :str = True _UpperCAmelCase :str = {} def __UpperCamelCase( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase : str = NllbTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = NllbTokenizer(A_ , keep_accents=A_ ) UpperCamelCase : List[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(A_ , **A_ ) UpperCamelCase : Dict = tempfile.mkdtemp() UpperCamelCase : List[Any] = tokenizer_r.save_pretrained(A_ ) UpperCamelCase : int = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) UpperCamelCase : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way UpperCamelCase : Any = tokenizer_r.from_pretrained(A_ ) UpperCamelCase : Tuple = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True UpperCamelCase : str = tempfile.mkdtemp() UpperCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) UpperCamelCase : Dict = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way UpperCamelCase : Dict = tokenizer_r.from_pretrained(A_ ) UpperCamelCase : Dict = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False UpperCamelCase : int = tempfile.mkdtemp() UpperCamelCase : Optional[Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) UpperCamelCase : List[Any] = tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase : Optional[int] = tokenizer_r.from_pretrained(A_ ) UpperCamelCase : Dict = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' if not self.test_seqaseq: return UpperCamelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. UpperCamelCase : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] UpperCamelCase : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" " Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi" " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: UpperCamelCase : List[str] = tokenizer.prepare_seqaseq_batch( src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified UpperCamelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch( A_ , tgt_texts=A_ , max_length=3 , return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) UpperCamelCase : str = tokenizer.prepare_seqaseq_batch( src_texts=A_ , max_length=3 , max_target_length=10 , return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("decoder_input_ids" , A_ ) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCamelCase : Dict = [AddedToken("<special>" , lstrip=A_ )] UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) UpperCamelCase : Union[str, Any] = tokenizer_r.encode("Hey this is a <special> token" ) UpperCamelCase : List[Any] = tokenizer_r.encode("<special>" , add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) UpperCamelCase : List[str] = tokenizer_p.encode("Hey this is a <special> token" ) UpperCamelCase : Optional[int] = tokenizer_cr.encode("Hey this is a <special> token" ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): _UpperCAmelCase :List[Any] = 'facebook/nllb-200-distilled-600M' _UpperCAmelCase :Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _UpperCAmelCase :Optional[int] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _UpperCAmelCase :Any = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def __UpperCamelCase( cls ): '''simple docstring''' UpperCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" ) UpperCamelCase : int = 1 return cls def __UpperCamelCase( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.assertIn(A_ , self.tokenizer.all_special_ids ) # fmt: off UpperCamelCase : str = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on UpperCamelCase : Optional[Any] = self.tokenizer.decode(A_ , skip_special_tokens=A_ ) UpperCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ ) self.assertNotIn(self.tokenizer.eos_token , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , A_ ) UpperCamelCase : str = 10 UpperCamelCase : Dict = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , A_ ) self.assertEqual(len(A_ ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_6203, 3] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = tempfile.mkdtemp() UpperCamelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) UpperCamelCase : List[str] = NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) UpperCamelCase : Optional[Any] = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) UpperCamelCase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A_ ) self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors="pt" ) UpperCamelCase : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors="pt" ) UpperCamelCase : str = targets["input_ids"] UpperCamelCase : List[Any] = shift_tokens_right( A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(A_ ) , { # A, test, EOS, en_XX "input_ids": [[25_6047, 70, 7356, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_6057, } , ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = True UpperCamelCase : List[Any] = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) UpperCamelCase : Optional[Any] = False UpperCamelCase : List[Any] = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
52
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=[1, 1, 2] , A_=1 , A_=32 , A_=4 , A_=8 , A_=37 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=0.0 , A_=512 , A_=3 , A_=0.02 , A_=3 , A_=4 , A_=None , A_=False , ): '''simple docstring''' UpperCamelCase : int = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : List[Any] = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Optional[Any] = use_input_mask UpperCamelCase : Any = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : List[str] = vocab_size UpperCamelCase : Tuple = block_sizes UpperCamelCase : List[Any] = num_decoder_layers UpperCamelCase : Optional[Any] = d_model UpperCamelCase : List[Any] = n_head UpperCamelCase : Union[str, Any] = d_head UpperCamelCase : Tuple = d_inner UpperCamelCase : str = hidden_act UpperCamelCase : List[Any] = hidden_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Optional[int] = max_position_embeddings UpperCamelCase : Tuple = type_vocab_size UpperCamelCase : List[str] = 2 UpperCamelCase : Optional[Any] = num_labels UpperCamelCase : Union[str, Any] = num_choices UpperCamelCase : Tuple = scope UpperCamelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer UpperCamelCase : str = n_head # Used in the tests to check the size of the first hidden state UpperCamelCase : Optional[Any] = self.d_model # Used in the tests to check the number of output hidden states/attentions UpperCamelCase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: UpperCamelCase : Union[str, Any] = self.num_hidden_layers + 2 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : int = None if self.use_input_mask: UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Any = None if self.use_token_type_ids: UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : List[Any] = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : Optional[int] = None if self.use_labels: UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Dict = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = TFFunnelModel(config=A_ ) UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : List[str] = model(A_ ) UpperCamelCase : Union[str, Any] = [input_ids, input_mask] UpperCamelCase : Tuple = model(A_ ) UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCamelCase : int = False UpperCamelCase : Optional[int] = TFFunnelModel(config=A_ ) UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCamelCase : Union[str, Any] = False UpperCamelCase : str = TFFunnelModel(config=A_ ) UpperCamelCase : List[Any] = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[str] = TFFunnelBaseModel(config=A_ ) UpperCamelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Optional[int] = model(A_ ) UpperCamelCase : List[Any] = [input_ids, input_mask] UpperCamelCase : int = model(A_ ) UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) UpperCamelCase : str = False UpperCamelCase : Union[str, Any] = TFFunnelBaseModel(config=A_ ) UpperCamelCase : Optional[int] = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) UpperCamelCase : Union[str, Any] = False UpperCamelCase : List[Any] = TFFunnelBaseModel(config=A_ ) UpperCamelCase : Dict = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = TFFunnelForPreTraining(config=A_ ) UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = TFFunnelForMaskedLM(config=A_ ) UpperCamelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : List[Any] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = self.num_labels UpperCamelCase : Dict = TFFunnelForSequenceClassification(config=A_ ) UpperCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Any = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = self.num_choices UpperCamelCase : str = TFFunnelForMultipleChoice(config=A_ ) UpperCamelCase : int = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : str = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase : Any = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCamelCase : List[str] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = self.num_labels UpperCamelCase : Optional[Any] = TFFunnelForTokenClassification(config=A_ ) UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : List[str] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = TFFunnelForQuestionAnswering(config=A_ ) UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = config_and_inputs UpperCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) _UpperCAmelCase :Dict = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Tuple = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = TFFunnelModelTester(self ) UpperCamelCase : List[str] = ConfigTester(self , config_class=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @require_tf class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) _UpperCAmelCase :List[Any] = False _UpperCAmelCase :List[Any] = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = TFFunnelModelTester(self , base=A_ ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ )
52
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
1
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = params UpperCamelCase : Union[str, Any] = np.array(A_ ) UpperCamelCase : Optional[int] = np.array([len(A_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A_ ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __UpperCamelCase( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.params.max_model_input_size UpperCamelCase : Dict = self.lengths > max_len logger.info(F"""Splitting {sum(A_ )} too long sequences.""" ) def divide_chunks(A_ , A_ ): return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )] UpperCamelCase : List[str] = [] UpperCamelCase : Tuple = [] if self.params.mlm: UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: UpperCamelCase : Dict = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: UpperCamelCase : Union[str, Any] = np.insert(A_ , 0 , A_ ) if sub_s[-1] != sep_id: UpperCamelCase : Optional[int] = np.insert(A_ , len(A_ ) , A_ ) assert len(A_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A_ ) new_tok_ids.extend(A_ ) new_lengths.extend([len(A_ ) for l in sub_seqs] ) UpperCamelCase : Union[str, Any] = np.array(A_ ) UpperCamelCase : Union[str, Any] = np.array(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = len(self ) UpperCamelCase : Dict = self.lengths > 11 UpperCamelCase : List[str] = self.token_ids[indices] UpperCamelCase : Tuple = self.lengths[indices] UpperCamelCase : Optional[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __UpperCamelCase( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: UpperCamelCase : List[str] = self.params.special_tok_ids["unk_token"] UpperCamelCase : int = len(self ) UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) UpperCamelCase : List[Any] = (unk_occs / self.lengths) < 0.5 UpperCamelCase : List[Any] = self.token_ids[indices] UpperCamelCase : str = self.lengths[indices] UpperCamelCase : Dict = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __UpperCamelCase( self ): '''simple docstring''' if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : str = [t[0] for t in batch] UpperCamelCase : str = [t[1] for t in batch] assert len(A_ ) == len(A_ ) # Max for paddings UpperCamelCase : Union[str, Any] = max(A_ ) # Pad token ids if self.params.mlm: UpperCamelCase : List[Any] = self.params.special_tok_ids["pad_token"] else: UpperCamelCase : Dict = self.params.special_tok_ids["unk_token"] UpperCamelCase : Optional[int] = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids] assert len(tk_ ) == len(A_ ) assert all(len(A_ ) == max_seq_len_ for t in tk_ ) UpperCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) UpperCamelCase : Any = torch.tensor(A_ ) # (bs) return tk_t, lg_t
52
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
1
import sys from collections import defaultdict class A__ : def __init__( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.node_position[vertex] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = pos def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCamelCase : List[Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCamelCase : Any = 2 * start + 1 else: UpperCamelCase : str = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCamelCase , UpperCamelCase : str = heap[smallest_child], positions[smallest_child] UpperCamelCase , UpperCamelCase : Union[str, Any] = ( heap[start], positions[start], ) UpperCamelCase , UpperCamelCase : Optional[Any] = temp, tempa UpperCamelCase : Any = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , A_ ) self.top_to_bottom(A_ , A_ , A_ , A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = position[index] while index != 0: UpperCamelCase : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCamelCase : Dict = heap[parent] UpperCamelCase : int = position[parent] self.set_position(position[parent] , A_ ) else: UpperCamelCase : Optional[int] = val UpperCamelCase : Tuple = temp self.set_position(A_ , A_ ) break UpperCamelCase : List[Any] = parent else: UpperCamelCase : Dict = val UpperCamelCase : Tuple = temp self.set_position(A_ , 0 ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = len(A_ ) // 2 - 1 for i in range(A_ , -1 , -1 ): self.top_to_bottom(A_ , A_ , len(A_ ) , A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = positions[0] UpperCamelCase : Any = sys.maxsize self.top_to_bottom(A_ , 0 , len(A_ ) , A_ ) return temp def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : List[Any] = Heap() UpperCamelCase : Union[str, Any] = [0] * len(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCamelCase : Tuple = [] # Heap of Distance of vertices from their neighboring vertex UpperCamelCase : Optional[Any] = [] for vertex in range(len(_lowerCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_lowerCAmelCase ) heap.node_position.append(_lowerCAmelCase ) UpperCamelCase : int = [] UpperCamelCase : Any = 1 UpperCamelCase : List[Any] = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCamelCase : List[Any] = 0 UpperCamelCase : int = distance heap.heapify(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(1 , len(_lowerCAmelCase ) ): UpperCamelCase : str = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCamelCase : Tuple = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_lowerCAmelCase )] ): UpperCamelCase : Any = distance heap.bottom_to_top( _lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Union[str, Any] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __lowerCamelCase : Any = int(input("""Enter number of edges: """).strip()) __lowerCamelCase : Optional[Any] = defaultdict(list) for _ in range(edges_number): __lowerCamelCase : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
52
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
1
def A_ ( _lowerCAmelCase ) -> Tuple: UpperCamelCase : Dict = 0 UpperCamelCase : Any = len(_lowerCAmelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCAmelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def A_ ( _lowerCAmelCase ) -> Optional[int]: if len(_lowerCAmelCase ) <= 1: return arr, 0 UpperCamelCase : int = len(_lowerCAmelCase ) // 2 UpperCamelCase : Union[str, Any] = arr[0:mid] UpperCamelCase : Optional[Any] = arr[mid:] UpperCamelCase , UpperCamelCase : Optional[int] = count_inversions_recursive(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Optional[Any] = count_inversions_recursive(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : List[str] = _count_cross_inversions(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions return c, num_inversions def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Dict = [] UpperCamelCase : int = 0 while i < len(_lowerCAmelCase ) and j < len(_lowerCAmelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCAmelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCAmelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def A_ ( ) -> Dict: UpperCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) UpperCamelCase : Optional[int] = count_inversions_bf(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : str = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , _lowerCAmelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() UpperCamelCase : int = count_inversions_bf(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : List[Any] = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _lowerCAmelCase ) # an empty list should also have zero inversions UpperCamelCase : List[str] = [] UpperCamelCase : Any = count_inversions_bf(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Tuple = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _lowerCAmelCase ) if __name__ == "__main__": main()
52
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCamelCase : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCamelCase : Optional[Any] = "xvjiarui/stable-diffusion-2-inpainting" UpperCamelCase , UpperCamelCase : Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_ ) UpperCamelCase : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCamelCase : List[Any] = jax.random.PRNGKey(0 ) UpperCamelCase : Optional[Any] = 50 UpperCamelCase : List[str] = jax.device_count() UpperCamelCase : Optional[int] = num_samples * [prompt] UpperCamelCase : str = num_samples * [init_image] UpperCamelCase : Optional[Any] = num_samples * [mask_image] UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = pipeline.prepare_inputs(A_ , A_ , A_ ) # shard inputs and rng UpperCamelCase : List[str] = replicate(A_ ) UpperCamelCase : Any = jax.random.split(A_ , jax.device_count() ) UpperCamelCase : Dict = shard(A_ ) UpperCamelCase : Any = shard(A_ ) UpperCamelCase : Tuple = shard(A_ ) UpperCamelCase : int = pipeline( A_ , A_ , A_ , A_ , A_ , A_ , jit=A_ ) UpperCamelCase : Optional[Any] = output.images.reshape(A_ , 512 , 512 , 3 ) UpperCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1] UpperCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase : List[Any] = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
52
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
1
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: if not (isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )): raise ValueError("longest_common_substring() takes two strings for inputs" ) UpperCamelCase : str = len(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = len(_lowerCAmelCase ) UpperCamelCase : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] UpperCamelCase : Optional[Any] = 0 UpperCamelCase : List[Any] = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: UpperCamelCase : Optional[int] = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: UpperCamelCase : Optional[Any] = i UpperCamelCase : Optional[int] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
52
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : List[str] = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Optional[int] = 16 __lowerCamelCase : Union[str, Any] = 32 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Any: UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCamelCase : Dict = DatasetDict( { "train": dataset["train"].select(_lowerCAmelCase ), "validation": dataset["train"].select(_lowerCAmelCase ), "test": dataset["validation"], } ) def tokenize_function(_lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase : Any = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": UpperCamelCase : List[Any] = 8 else: UpperCamelCase : Tuple = None return tokenizer.pad( _lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. UpperCamelCase : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) UpperCamelCase : str = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) UpperCamelCase : Dict = DataLoader( tokenized_datasets["test"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader, test_dataloader def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: # New Code # UpperCamelCase : Dict = [] # Download the dataset UpperCamelCase : Optional[Any] = load_dataset("glue" , "mrpc" ) # Create our splits UpperCamelCase : Any = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase : int = config["lr"] UpperCamelCase : Union[str, Any] = int(config["num_epochs"] ) UpperCamelCase : List[Any] = int(config["seed"] ) UpperCamelCase : Optional[int] = int(config["batch_size"] ) UpperCamelCase : int = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation UpperCamelCase : List[str] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCamelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE UpperCamelCase : Optional[int] = MAX_GPU_BATCH_SIZE set_seed(_lowerCAmelCase ) # New Code # # Create our folds: UpperCamelCase : Tuple = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) UpperCamelCase : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(_lowerCAmelCase ): UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = get_fold_dataloaders( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase : str = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase : Tuple = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCamelCase : str = model(**_lowerCAmelCase ) UpperCamelCase : Optional[Any] = outputs.loss UpperCamelCase : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase : List[str] = model(**_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) UpperCamelCase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase ) # New Code # # We also run predictions on the test set at the very end UpperCamelCase : Union[str, Any] = [] for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase : Optional[int] = model(**_lowerCAmelCase ) UpperCamelCase : Optional[int] = outputs.logits UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(_lowerCAmelCase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCamelCase : Optional[int] = torch.cat(_lowerCAmelCase , dim=0 ) UpperCamelCase : Optional[int] = torch.stack(_lowerCAmelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCamelCase : Optional[Any] = metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase ) accelerator.print("Average test metrics from all folds:" , _lowerCAmelCase ) def A_ ( ) -> int: UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=_lowerCAmelCase , default=3 , help="The number of splits to perform across the dataset" ) UpperCamelCase : int = parser.parse_args() UpperCamelCase : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
52
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__ ( __snake_case , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): @property def __UpperCamelCase( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = ort.SessionOptions() UpperCamelCase : Any = False return options def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCamelCase : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCamelCase : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = "A red cat sitting on a park bench" UpperCamelCase : List[Any] = np.random.RandomState(0 ) UpperCamelCase : Union[str, Any] = pipe( prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , ) UpperCamelCase : Optional[int] = output.images UpperCamelCase : Any = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCamelCase : Dict = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCamelCase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCamelCase : Optional[Any] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) UpperCamelCase : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Optional[int] = "A red cat sitting on a park bench" UpperCamelCase : List[str] = np.random.RandomState(0 ) UpperCamelCase : Union[str, Any] = pipe( prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , ) UpperCamelCase : Optional[Any] = output.images UpperCamelCase : List[str] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCamelCase : List[Any] = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
52
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : List[str] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : int = concatenate_datasets __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[Any] = DownloadManager __lowerCamelCase : int = DownloadMode __lowerCamelCase : Any = DownloadConfig __lowerCamelCase : str = DownloadMode __lowerCamelCase : List[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
52
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
1
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :int = KandinskyVaaControlnetPipeline _UpperCAmelCase :Dict = ['image_embeds', 'negative_image_embeds', 'hint'] _UpperCAmelCase :Tuple = ['image_embeds', 'negative_image_embeds', 'hint'] _UpperCAmelCase :str = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :Optional[int] = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : Dict = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Any = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.dummy_unet UpperCamelCase : Optional[int] = self.dummy_movq UpperCamelCase : Optional[int] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : List[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) # create hint UpperCamelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Union[str, Any] = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Dict = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = "cpu" UpperCamelCase : Dict = self.get_dummy_components() UpperCamelCase : int = self.pipeline_class(**A_ ) UpperCamelCase : Any = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : int = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : Tuple = output.images UpperCamelCase : List[str] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : Tuple = image[0, -3:, -3:, -1] UpperCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : str = np.array( [0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) UpperCamelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) UpperCamelCase : Optional[int] = torch.from_numpy(np.array(A_ ) ).float() / 2_55.0 UpperCamelCase : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) UpperCamelCase : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) UpperCamelCase : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : str = "A robot, 4k photo" UpperCamelCase : Optional[Any] = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Dict = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCamelCase : str = pipeline( image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_ , A_ )
52
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A__ ( __snake_case ): _UpperCAmelCase :Any = 'data2vec-audio' def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=16 , A_=19 , A_=5 , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_="sum" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=False , A_=3 , A_=2 , A_=3 , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ ) UpperCamelCase : str = hidden_size UpperCamelCase : Optional[Any] = feat_extract_activation UpperCamelCase : Optional[Any] = list(A_ ) UpperCamelCase : Optional[int] = list(A_ ) UpperCamelCase : Optional[Any] = list(A_ ) UpperCamelCase : Optional[Any] = conv_bias UpperCamelCase : int = num_conv_pos_embeddings UpperCamelCase : List[Any] = num_conv_pos_embedding_groups UpperCamelCase : int = conv_pos_kernel_size UpperCamelCase : Any = len(self.conv_dim ) UpperCamelCase : int = num_hidden_layers UpperCamelCase : int = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : int = num_attention_heads UpperCamelCase : Any = hidden_dropout UpperCamelCase : Dict = attention_dropout UpperCamelCase : List[str] = activation_dropout UpperCamelCase : Optional[Any] = feat_proj_dropout UpperCamelCase : List[str] = final_dropout UpperCamelCase : Union[str, Any] = layerdrop UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : List[Any] = initializer_range UpperCamelCase : str = vocab_size UpperCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase : Tuple = mask_time_prob UpperCamelCase : Dict = mask_time_length UpperCamelCase : Dict = mask_time_min_masks UpperCamelCase : int = mask_feature_prob UpperCamelCase : Tuple = mask_feature_length UpperCamelCase : Tuple = mask_feature_min_masks # ctc loss UpperCamelCase : Union[str, Any] = ctc_loss_reduction UpperCamelCase : Tuple = ctc_zero_infinity # adapter UpperCamelCase : Any = add_adapter UpperCamelCase : List[Any] = adapter_kernel_size UpperCamelCase : Optional[int] = adapter_stride UpperCamelCase : int = num_adapter_layers UpperCamelCase : List[str] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCamelCase : Tuple = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCamelCase : Any = list(A_ ) UpperCamelCase : Tuple = list(A_ ) UpperCamelCase : Optional[int] = list(A_ ) UpperCamelCase : Union[str, Any] = xvector_output_dim @property def __UpperCamelCase( self ): '''simple docstring''' return math.prod(self.conv_stride )
52
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def A_ ( _lowerCAmelCase ) -> Optional[Any]: return EnvironmentCommand() def A_ ( _lowerCAmelCase ) -> List[str]: return EnvironmentCommand(args.accelerate_config_file ) class A__ ( __snake_case ): @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' UpperCamelCase : Tuple = parser.add_parser("env" ) download_parser.set_defaults(func=A_ ) download_parser.add_argument( "--accelerate-config_file" , default=A_ , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=A_ ) def __init__( self , A_ , *A_ ): '''simple docstring''' UpperCamelCase : str = accelerate_config_file def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = "not installed" if is_safetensors_available(): import safetensors UpperCamelCase : Tuple = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors UpperCamelCase : Optional[Any] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" UpperCamelCase : Optional[Any] = "not installed" UpperCamelCase : Optional[Any] = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file UpperCamelCase : Tuple = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(A_ ): UpperCamelCase : Tuple = load_config_from_file(self._accelerate_config_file ).to_dict() UpperCamelCase : Optional[Any] = ( "\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(A_ , A_ ) else F"""\t{accelerate_config}""" ) UpperCamelCase : List[Any] = "not installed" UpperCamelCase : Optional[int] = "NA" if is_torch_available(): import torch UpperCamelCase : List[str] = torch.__version__ UpperCamelCase : List[Any] = torch.cuda.is_available() UpperCamelCase : Union[str, Any] = "not installed" UpperCamelCase : str = "NA" if is_tf_available(): import tensorflow as tf UpperCamelCase : List[str] = tf.__version__ try: # deprecated in v2.1 UpperCamelCase : Dict = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool UpperCamelCase : List[Any] = bool(tf.config.list_physical_devices("GPU" ) ) UpperCamelCase : Tuple = "not installed" UpperCamelCase : Optional[Any] = "not installed" UpperCamelCase : Tuple = "not installed" UpperCamelCase : Optional[int] = "NA" if is_flax_available(): import flax import jax import jaxlib UpperCamelCase : Union[str, Any] = flax.__version__ UpperCamelCase : Optional[Any] = jax.__version__ UpperCamelCase : List[str] = jaxlib.__version__ UpperCamelCase : int = jax.lib.xla_bridge.get_backend().platform UpperCamelCase : Optional[Any] = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": F"""{safetensors_version}""", "Accelerate version": F"""{accelerate_version}""", "Accelerate config": F"""{accelerate_config_str}""", "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""", "Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""", "Jax version": F"""{jax_version}""", "JaxLib version": F"""{jaxlib_version}""", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(A_ ) ) return info @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
52
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
1
__lowerCamelCase : List[str] = 8.3_1_4_4_5_9_8 def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float: if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __lowerCamelCase : str = 300 __lowerCamelCase : List[Any] = 28 __lowerCamelCase : List[str] = rms_speed_of_molecule(temperature, molar_mass) print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
1
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCamelCase : Dict = get_tests_dir("""fixtures/dummy-config.json""") class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = 0 def __UpperCamelCase( self ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = AutoConfig.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = AutoConfig.for_model("roberta" ) self.assertIsInstance(A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCamelCase : Optional[int] = os.path.join(A_ , "fake-roberta" ) os.makedirs(A_ , exist_ok=A_ ) with open(os.path.join(A_ , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) UpperCamelCase : Tuple = AutoConfig.from_pretrained(A_ ) self.assertEqual(type(A_ ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' try: AutoConfig.register("custom" , A_ ) # Wrong model type will raise an error with self.assertRaises(A_ ): AutoConfig.register("model" , A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoConfig.register("bert" , A_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase : Tuple = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(A_ ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __UpperCamelCase( self ): '''simple docstring''' with self.assertRaisesRegex( A_ , "bert-base is not a local folder and is not a valid model identifier" ): UpperCamelCase : Dict = AutoConfig.from_pretrained("bert-base" ) def __UpperCamelCase( self ): '''simple docstring''' with self.assertRaisesRegex( A_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(A_ , revision="aaaaaa" ) def __UpperCamelCase( self ): '''simple docstring''' with self.assertRaisesRegex( A_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): UpperCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def __UpperCamelCase( self ): '''simple docstring''' with self.assertRaises(A_ ): UpperCamelCase : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(A_ ): UpperCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A_ ) UpperCamelCase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A_ ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(A_ ) UpperCamelCase : Any = AutoConfig.from_pretrained(A_ , trust_remote_code=A_ ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def __UpperCamelCase( self ): '''simple docstring''' class A__ ( __snake_case ): _UpperCAmelCase :str = 'new-model' try: AutoConfig.register("new-model" , A_ ) # If remote code is not set, the default is to use local UpperCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. UpperCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A_ ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A_ ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
52
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'deberta-v2' def __init__( self , A_=12_8100 , A_=1536 , A_=24 , A_=24 , A_=6144 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0 , A_=0.02 , A_=1e-7 , A_=False , A_=-1 , A_=0 , A_=True , A_=None , A_=0 , A_="gelu" , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Optional[int] = num_hidden_layers UpperCamelCase : Tuple = num_attention_heads UpperCamelCase : Optional[int] = intermediate_size UpperCamelCase : str = hidden_act UpperCamelCase : List[Any] = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : List[str] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : Dict = initializer_range UpperCamelCase : str = relative_attention UpperCamelCase : Union[str, Any] = max_relative_positions UpperCamelCase : List[Any] = pad_token_id UpperCamelCase : Tuple = position_biased_input # Backwards compatibility if type(A_ ) == str: UpperCamelCase : Any = [x.strip() for x in pos_att_type.lower().split("|" )] UpperCamelCase : Any = pos_att_type UpperCamelCase : Optional[Any] = vocab_size UpperCamelCase : Optional[int] = layer_norm_eps UpperCamelCase : List[str] = kwargs.get("pooler_hidden_size" , A_ ) UpperCamelCase : Any = pooler_dropout UpperCamelCase : Optional[Any] = pooler_hidden_act class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : str = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : List[Any] = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def __UpperCamelCase( self ): '''simple docstring''' return 12 def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
52
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = "ylacombe/bark-small" UpperCamelCase : List[Any] = tempfile.mkdtemp() UpperCamelCase : Optional[int] = "en_speaker_1" UpperCamelCase : Tuple = "This is a test string" UpperCamelCase : Tuple = "speaker_embeddings_path.json" UpperCamelCase : List[str] = "speaker_embeddings" def __UpperCamelCase( self , **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_tokenizer() UpperCamelCase : Optional[Any] = BarkProcessor(tokenizer=A_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCamelCase : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCamelCase : int = 35 UpperCamelCase : str = 2 UpperCamelCase : Dict = 8 UpperCamelCase : Tuple = { "semantic_prompt": np.ones(A_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCamelCase : Any = processor(text=self.input_string , voice_preset=A_ ) UpperCamelCase : Dict = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCamelCase : str = os.path.join(self.tmpdirname , "file.npz" ) np.savez(A_ , **A_ ) UpperCamelCase : Union[str, Any] = processor(text=self.input_string , voice_preset=A_ ) UpperCamelCase : List[Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCamelCase : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.get_tokenizer() UpperCamelCase : str = BarkProcessor(tokenizer=A_ ) UpperCamelCase : str = processor(text=self.input_string ) UpperCamelCase : Tuple = tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
52
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : Optional[Any] = batch_size UpperCamelCase : Optional[Any] = image_size UpperCamelCase : Any = patch_size UpperCamelCase : Any = num_channels UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : int = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : Dict = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : Optional[int] = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = type_sequence_label_size UpperCamelCase : Tuple = initializer_range UpperCamelCase : List[str] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase : Any = (image_size // patch_size) ** 2 UpperCamelCase : List[str] = num_patches + 1 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : str = None if self.use_labels: UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __UpperCamelCase( self ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = ViTMSNModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = self.type_sequence_label_size UpperCamelCase : Optional[int] = ViTMSNForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , labels=A_ ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase : Tuple = 1 UpperCamelCase : List[str] = ViTMSNForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : str = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs UpperCamelCase : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _UpperCAmelCase :Optional[Any] = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :int = False _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Any = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = ViTMSNModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Any = model_class(A_ ) UpperCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Optional[int] = [*signature.parameters.keys()] UpperCamelCase : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Union[str, Any] = ViTMSNModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_ ( ) -> Tuple: UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def __UpperCamelCase( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(2 ) UpperCamelCase : Union[str, Any] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(A_ ) UpperCamelCase : Any = self.default_image_processor UpperCamelCase : List[str] = prepare_img() UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(**A_ ) # verify the logits UpperCamelCase : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
1
import math def A_ ( _lowerCAmelCase = 100 ) -> int: UpperCamelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) ) UpperCamelCase : List[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
52
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: for attribute in key.split("." ): UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: UpperCamelCase : str = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: UpperCamelCase : str = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase : Dict = value elif weight_type == "weight_g": UpperCamelCase : Optional[int] = value elif weight_type == "weight_v": UpperCamelCase : Optional[int] = value elif weight_type == "bias": UpperCamelCase : List[str] = value else: UpperCamelCase : Dict = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[Any] = [] UpperCamelCase : Union[str, Any] = fairseq_model.state_dict() UpperCamelCase : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) UpperCamelCase : Dict = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase : Any = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCamelCase : Any = True if "*" in mapped_key: UpperCamelCase : Tuple = name.split(_lowerCAmelCase )[0].split("." )[-2] UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: UpperCamelCase : int = "weight_g" elif "weight_v" in name: UpperCamelCase : Optional[Any] = "weight_v" elif "weight" in name: UpperCamelCase : int = "weight" elif "bias" in name: UpperCamelCase : str = "bias" else: UpperCamelCase : Any = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: UpperCamelCase : Any = full_name.split("conv_layers." )[-1] UpperCamelCase : Tuple = name.split("." ) UpperCamelCase : Dict = int(items[0] ) UpperCamelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase : Tuple = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : List[Any] = SEWConfig() if is_finetuned: UpperCamelCase : int = model.wav_encoder.wav_model.cfg else: UpperCamelCase : Dict = model.cfg UpperCamelCase : int = fs_config.conv_bias UpperCamelCase : Dict = eval(fs_config.conv_feature_layers ) UpperCamelCase : Union[str, Any] = [x[0] for x in conv_layers] UpperCamelCase : Union[str, Any] = [x[1] for x in conv_layers] UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers] UpperCamelCase : int = "gelu" UpperCamelCase : List[Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : List[str] = fs_config.activation_fn.name UpperCamelCase : Any = fs_config.encoder_embed_dim UpperCamelCase : Optional[int] = 0.02 UpperCamelCase : str = fs_config.encoder_ffn_embed_dim UpperCamelCase : List[str] = 1e-5 UpperCamelCase : Optional[Any] = fs_config.encoder_layerdrop UpperCamelCase : Any = fs_config.encoder_attention_heads UpperCamelCase : List[Any] = fs_config.conv_pos_groups UpperCamelCase : Any = fs_config.conv_pos UpperCamelCase : Dict = len(_lowerCAmelCase ) UpperCamelCase : str = fs_config.encoder_layers UpperCamelCase : List[str] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: UpperCamelCase : int = model.cfg UpperCamelCase : Optional[Any] = fs_config.final_dropout UpperCamelCase : Tuple = fs_config.layerdrop UpperCamelCase : Union[str, Any] = fs_config.activation_dropout UpperCamelCase : Optional[int] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 UpperCamelCase : List[Any] = fs_config.attention_dropout UpperCamelCase : Optional[int] = fs_config.dropout_input UpperCamelCase : Optional[Any] = fs_config.dropout UpperCamelCase : Union[str, Any] = fs_config.mask_channel_length UpperCamelCase : int = fs_config.mask_channel_prob UpperCamelCase : Union[str, Any] = fs_config.mask_length UpperCamelCase : Union[str, Any] = fs_config.mask_prob UpperCamelCase : Union[str, Any] = "Wav2Vec2FeatureExtractor" UpperCamelCase : Optional[int] = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ) -> Optional[Any]: if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: UpperCamelCase : Dict = SEWConfig.from_pretrained(_lowerCAmelCase ) else: UpperCamelCase : List[str] = convert_config(model[0] , _lowerCAmelCase ) UpperCamelCase : int = model[0].eval() UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) if is_finetuned: if dict_path: UpperCamelCase : int = Dictionary.load(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase : List[str] = target_dict.pad_index UpperCamelCase : List[str] = target_dict.bos_index UpperCamelCase : List[Any] = target_dict.pad_index UpperCamelCase : Optional[int] = target_dict.bos_index UpperCamelCase : List[Any] = target_dict.eos_index UpperCamelCase : Optional[int] = len(target_dict.symbols ) UpperCamelCase : Dict = os.path.join(_lowerCAmelCase , "vocab.json" ) if not os.path.isdir(_lowerCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , _lowerCAmelCase ) UpperCamelCase : str = WavaVecaCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , ) UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) UpperCamelCase : Dict = SEWForCTC(_lowerCAmelCase ) else: UpperCamelCase : Optional[int] = SEWModel(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : Tuple = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
52
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
1
import numpy # List of input, output pairs __lowerCamelCase : Any = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __lowerCamelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150)) __lowerCamelCase : Tuple = [2, 4, 1, 5] __lowerCamelCase : List[str] = len(train_data) __lowerCamelCase : Union[str, Any] = 0.0_0_9 def A_ ( _lowerCAmelCase , _lowerCAmelCase="train" ) -> Tuple: return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output( _lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Dict = 0 for i in range(len(_lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A_ ( _lowerCAmelCase , _lowerCAmelCase=m ) -> Any: UpperCamelCase : int = 0 for i in range(_lowerCAmelCase ): if index == -1: summation_value += _error(_lowerCAmelCase ) else: summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index] return summation_value def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : str = summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m return cost_derivative_value def A_ ( ) -> Union[str, Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCamelCase : int = 0.000_002 UpperCamelCase : Optional[int] = 0 UpperCamelCase : List[str] = 0 while True: j += 1 UpperCamelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(_lowerCAmelCase ) ): UpperCamelCase : List[Any] = get_cost_derivative(i - 1 ) UpperCamelCase : Union[str, Any] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ): break UpperCamelCase : Union[str, Any] = temp_parameter_vector print(("Number of iterations:", j) ) def A_ ( ) -> Tuple: for i in range(len(_lowerCAmelCase ) ): print(("Actual output value:", output(_lowerCAmelCase , "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(_lowerCAmelCase , "test" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
52
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class A__ ( __snake_case ): def __init__( self , A_ = 101 ): '''simple docstring''' UpperCamelCase : Dict = length def __len__( self ): '''simple docstring''' return self.length def __getitem__( self , A_ ): '''simple docstring''' return i class A__ : def __call__( self , A_ ): '''simple docstring''' return {"input_ids": torch.tensor(A_ ), "labels": torch.tensor(A_ )} class A__ ( nn.Module ): def __init__( self ): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. UpperCamelCase : Optional[int] = nn.Linear(120 , 80 ) def __UpperCamelCase( self , A_ , A_=None ): '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class A__ ( __snake_case ): @require_torch_neuroncore def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() UpperCamelCase : List[str] = F"""--output_dir {output_dir}""".split() UpperCamelCase : Optional[int] = ["torchrun"] + distributed_args + args execute_subprocess_async(A_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class A__ ( __snake_case ): @require_torch_multi_gpu def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() UpperCamelCase : List[str] = F"""--output_dir {output_dir}""".split() UpperCamelCase : Optional[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(A_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __lowerCamelCase : List[str] = HfArgumentParser((TrainingArguments,)) __lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0] logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __lowerCamelCase : str = DummyDataset(dataset_length) def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Optional[Any] = list(range(len(_lowerCAmelCase ) ) ) UpperCamelCase : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __lowerCamelCase : str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __lowerCamelCase : Any = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __lowerCamelCase : List[Any] = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __lowerCamelCase : Optional[int] = 2 __lowerCamelCase : Tuple = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __lowerCamelCase : Tuple = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __lowerCamelCase : Dict = None
52
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class A__ ( __snake_case ): def __init__( self , *A_ , **A_ ): '''simple docstring''' warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
52
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : int = {"""vocab_file""": """sentencepiece.bpe.model"""} __lowerCamelCase : Any = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } __lowerCamelCase : Optional[Any] = { """camembert-base""": 512, } __lowerCamelCase : Union[str, Any] = """▁""" class A__ ( __snake_case ): _UpperCAmelCase :List[str] = VOCAB_FILES_NAMES _UpperCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :List[str] = ['input_ids', 'attention_mask'] def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Dict = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) UpperCamelCase : str = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> UpperCamelCase : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} UpperCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids ) UpperCamelCase : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) UpperCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase : Any = [self.cls_token_id] UpperCamelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCamelCase( self , A_ , A_ = None , A_ = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Dict = [self.sep_token_id] UpperCamelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.sp_model.encode(A_ , out_type=A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A_ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = [] UpperCamelCase : Optional[int] = "" UpperCamelCase : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token UpperCamelCase : List[Any] = True UpperCamelCase : Union[str, Any] = [] else: current_sub_tokens.append(A_ ) UpperCamelCase : Optional[Any] = False out_string += self.sp_model.decode(A_ ) return out_string.strip() def __getstate__( self ): '''simple docstring''' UpperCamelCase : Any = self.__dict__.copy() UpperCamelCase : Any = None return state def __setstate__( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCamelCase : Optional[Any] = {} UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Optional[int] = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , "wb" ) as fi: UpperCamelCase : str = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
52
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
1
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __lowerCamelCase : int = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __lowerCamelCase : str = logging.getLogger() def A_ ( ) -> List[str]: UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCamelCase : Union[str, Any] = parser.parse_args() return args.f def A_ ( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> Optional[int]: UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , F"""{split}_results.json""" ) if os.path.exists(_lowerCAmelCase ): with open(_lowerCAmelCase , "r" ) as f: return json.load(_lowerCAmelCase ) raise ValueError(F"""can't find {path}""" ) __lowerCamelCase : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_auto_remove_tmp_dir() UpperCamelCase : Optional[Any] = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A_ , "argv" , A_ ): run_flax_glue.main() UpperCamelCase : int = get_results(A_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.get_auto_remove_tmp_dir() UpperCamelCase : Optional[Any] = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , "argv" , A_ ): run_clm_flax.main() UpperCamelCase : Optional[Any] = get_results(A_ ) self.assertLess(result["eval_perplexity"] , 100 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.get_auto_remove_tmp_dir() UpperCamelCase : Optional[Any] = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A_ , "argv" , A_ ): run_summarization_flax.main() UpperCamelCase : Optional[Any] = get_results(A_ , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 10 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_auto_remove_tmp_dir() UpperCamelCase : Tuple = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A_ , "argv" , A_ ): run_mlm_flax.main() UpperCamelCase : Dict = get_results(A_ ) self.assertLess(result["eval_perplexity"] , 42 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() UpperCamelCase : Any = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A_ , "argv" , A_ ): run_ta_mlm_flax.main() UpperCamelCase : Tuple = get_results(A_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2 UpperCamelCase : int = self.get_auto_remove_tmp_dir() UpperCamelCase : Dict = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A_ , "argv" , A_ ): run_flax_ner.main() UpperCamelCase : Tuple = get_results(A_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_auto_remove_tmp_dir() UpperCamelCase : str = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A_ , "argv" , A_ ): run_qa.main() UpperCamelCase : Optional[int] = get_results(A_ ) self.assertGreaterEqual(result["eval_f1"] , 30 ) self.assertGreaterEqual(result["eval_exact"] , 30 )
52
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Tuple = 16 __lowerCamelCase : Optional[int] = 32 def A_ ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> List[str]: UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCamelCase : Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase : Any = 16 elif accelerator.mixed_precision != "no": UpperCamelCase : Union[str, Any] = 8 else: UpperCamelCase : List[str] = None return tokenizer.pad( _lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. UpperCamelCase : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) UpperCamelCase : int = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : Any = mocked_dataloaders # noqa: F811 def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1": UpperCamelCase : str = 2 # New Code # UpperCamelCase : Any = int(args.gradient_accumulation_steps ) UpperCamelCase : int = int(args.local_sgd_steps ) # Initialize accelerator UpperCamelCase : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase : Union[str, Any] = config["lr"] UpperCamelCase : str = int(config["num_epochs"] ) UpperCamelCase : List[str] = int(config["seed"] ) UpperCamelCase : Any = int(config["batch_size"] ) UpperCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" ) set_seed(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Any = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase : List[str] = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler UpperCamelCase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() with LocalSGD( accelerator=_lowerCAmelCase , model=_lowerCAmelCase , local_sgd_steps=_lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = model(**_lowerCAmelCase ) UpperCamelCase : List[Any] = output.loss accelerator.backward(_lowerCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase : Union[str, Any] = model(**_lowerCAmelCase ) UpperCamelCase : Optional[Any] = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) UpperCamelCase : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCAmelCase ) def A_ ( ) -> int: UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument( "--local_sgd_steps" , type=_lowerCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCamelCase : Union[str, Any] = parser.parse_args() UpperCamelCase : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
52
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=False , A_=False , A_=False , A_=2 , A_=99 , A_=0 , A_=32 , A_=5 , A_=4 , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=2 , A_=4 , A_="last" , A_=True , A_=None , A_=0 , ): '''simple docstring''' UpperCamelCase : Any = parent UpperCamelCase : str = batch_size UpperCamelCase : Tuple = seq_length UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : List[Any] = use_input_lengths UpperCamelCase : Optional[Any] = use_token_type_ids UpperCamelCase : Optional[int] = use_labels UpperCamelCase : Optional[int] = gelu_activation UpperCamelCase : Optional[Any] = sinusoidal_embeddings UpperCamelCase : Tuple = causal UpperCamelCase : List[Any] = asm UpperCamelCase : List[str] = n_langs UpperCamelCase : Any = vocab_size UpperCamelCase : List[str] = n_special UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Union[str, Any] = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[int] = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : int = max_position_embeddings UpperCamelCase : Union[str, Any] = type_sequence_label_size UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : Any = num_labels UpperCamelCase : Dict = num_choices UpperCamelCase : Union[str, Any] = summary_type UpperCamelCase : int = use_proj UpperCamelCase : List[Any] = scope UpperCamelCase : List[Any] = bos_token_id def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : List[str] = None if self.use_input_lengths: UpperCamelCase : Optional[int] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase : Optional[int] = None if self.use_token_type_ids: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase : Dict = None UpperCamelCase : int = None UpperCamelCase : Union[str, Any] = None if self.use_labels: UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float() UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Optional[int] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __UpperCamelCase( self ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[str] = XLMModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ , lengths=A_ , langs=A_ ) UpperCamelCase : Optional[int] = model(A_ , langs=A_ ) UpperCamelCase : Any = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[Any] = model(A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[str] = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ ) UpperCamelCase : List[Any] = model(A_ , start_positions=A_ , end_positions=A_ ) UpperCamelCase : Optional[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Optional[Any] = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Tuple = model(A_ ) UpperCamelCase : int = model( A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , ) UpperCamelCase : Dict = model( A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , ) ((UpperCamelCase) , ) : List[Any] = result_with_labels.to_tuple() UpperCamelCase : Tuple = model(A_ , start_positions=A_ , end_positions=A_ ) ((UpperCamelCase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Any = model(A_ ) UpperCamelCase : str = model(A_ , labels=A_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = self.num_labels UpperCamelCase : List[str] = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Optional[int] = self.num_choices UpperCamelCase : Tuple = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Optional[int] = model( A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :int = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCAmelCase :Union[str, Any] = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __UpperCamelCase( self , A_ , A_ , A_=False ): '''simple docstring''' UpperCamelCase : Union[str, Any] = super()._prepare_for_class(A_ , A_ , return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": UpperCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A_ ) UpperCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A_ ) return inputs_dict def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = XLMModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=A_ , emb_dim=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ): '''simple docstring''' self.assertIsInstance(A_ , A_ ) self.assertListEqual( [isinstance(A_ , A_ ) for iter_attentions in attentions] , [True] * len(A_ ) ) self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token UpperCamelCase : str = min_length + idx + 1 UpperCamelCase : int = min_length + idx + 1 UpperCamelCase : str = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_ ) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ): '''simple docstring''' self.assertIsInstance(A_ , A_ ) self.assertListEqual( [isinstance(A_ , A_ ) for iter_hidden_states in hidden_states] , [True] * len(A_ ) , ) self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token UpperCamelCase : Any = min_length + idx + 1 UpperCamelCase : Optional[int] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_ ) , ) pass @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Optional[Any] = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(A_ ) UpperCamelCase : Dict = torch.tensor([[14, 447]] , dtype=torch.long , device=A_ ) # the president UpperCamelCase : int = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference UpperCamelCase : Optional[int] = model.generate(A_ , do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_ )
52
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __lowerCamelCase : Any = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
1
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __lowerCamelCase : List[Any] = logging.get_logger(__name__) class A__ ( __snake_case ): def __init__( self , *A_ , **A_ ): '''simple docstring''' warnings.warn( "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ImageGPTImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
52
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
1
import unittest from knapsack import greedy_knapsack as kp class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = [10, 20, 30, 40, 50, 60] UpperCamelCase : Optional[int] = [2, 4, 6, 8, 10, 12] UpperCamelCase : str = 100 self.assertEqual(kp.calc_profit(A_ , A_ , A_ ) , 210 ) def __UpperCamelCase( self ): '''simple docstring''' self.assertRaisesRegex(A_ , "max_weight must greater than zero." ) def __UpperCamelCase( self ): '''simple docstring''' self.assertRaisesRegex(A_ , "Weight can not be negative." ) def __UpperCamelCase( self ): '''simple docstring''' self.assertRaisesRegex(A_ , "Profit can not be negative." ) def __UpperCamelCase( self ): '''simple docstring''' self.assertRaisesRegex(A_ , "max_weight must greater than zero." ) def __UpperCamelCase( self ): '''simple docstring''' self.assertRaisesRegex( A_ , "The length of profit and weight must be same." ) if __name__ == "__main__": unittest.main()
52
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : int = 384 UpperCamelCase : Optional[Any] = 7 if "tiny" in model_name: UpperCamelCase : Optional[Any] = 96 UpperCamelCase : Union[str, Any] = (2, 2, 6, 2) UpperCamelCase : str = (3, 6, 12, 24) elif "small" in model_name: UpperCamelCase : Any = 96 UpperCamelCase : Dict = (2, 2, 18, 2) UpperCamelCase : Tuple = (3, 6, 12, 24) elif "base" in model_name: UpperCamelCase : Optional[int] = 128 UpperCamelCase : Optional[Any] = (2, 2, 18, 2) UpperCamelCase : Optional[Any] = (4, 8, 16, 32) UpperCamelCase : Union[str, Any] = 12 UpperCamelCase : Dict = 512 elif "large" in model_name: UpperCamelCase : Dict = 192 UpperCamelCase : List[Any] = (2, 2, 18, 2) UpperCamelCase : Tuple = (6, 12, 24, 48) UpperCamelCase : List[str] = 12 UpperCamelCase : Optional[int] = 768 # set label information UpperCamelCase : Optional[int] = 150 UpperCamelCase : str = "huggingface/label-files" UpperCamelCase : Union[str, Any] = "ade20k-id2label.json" UpperCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) UpperCamelCase : int = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase : int = {v: k for k, v in idalabel.items()} UpperCamelCase : Tuple = SwinConfig( embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , num_heads=_lowerCAmelCase , window_size=_lowerCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) UpperCamelCase : List[Any] = UperNetConfig( backbone_config=_lowerCAmelCase , auxiliary_in_channels=_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , ) return config def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : List[str] = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[int] = dct.pop(_lowerCAmelCase ) UpperCamelCase : Any = val def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: UpperCamelCase : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCamelCase : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCamelCase : Union[str, Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) UpperCamelCase : List[str] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase : Any = in_proj_weight[:dim, :] UpperCamelCase : Dict = in_proj_bias[: dim] UpperCamelCase : str = in_proj_weight[ dim : dim * 2, : ] UpperCamelCase : Optional[Any] = in_proj_bias[ dim : dim * 2 ] UpperCamelCase : Dict = in_proj_weight[ -dim :, : ] UpperCamelCase : Dict = in_proj_bias[-dim :] # fmt: on def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase , UpperCamelCase : int = x.shape UpperCamelCase : str = x.reshape(_lowerCAmelCase , 4 , in_channel // 4 ) UpperCamelCase : Optional[int] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase ) return x def A_ ( _lowerCAmelCase ) -> Tuple: UpperCamelCase , UpperCamelCase : List[Any] = x.shape UpperCamelCase : Optional[int] = x.reshape(_lowerCAmelCase , in_channel // 4 , 4 ) UpperCamelCase : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase ) return x def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : List[str] = x.shape[0] UpperCamelCase : Any = x.reshape(4 , in_channel // 4 ) UpperCamelCase : int = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowerCAmelCase ) return x def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Tuple = x.shape[0] UpperCamelCase : List[Any] = x.reshape(in_channel // 4 , 4 ) UpperCamelCase : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowerCAmelCase ) return x def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: UpperCamelCase : Dict = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } UpperCamelCase : Dict = model_name_to_url[model_name] UpperCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , file_name=_lowerCAmelCase )[ "state_dict" ] for name, param in state_dict.items(): print(_lowerCAmelCase , param.shape ) UpperCamelCase : Dict = get_upernet_config(_lowerCAmelCase ) UpperCamelCase : Optional[int] = UperNetForSemanticSegmentation(_lowerCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCamelCase : List[Any] = state_dict.pop(_lowerCAmelCase ) if "bn" in key: UpperCamelCase : int = key.replace("bn" , "batch_norm" ) UpperCamelCase : List[Any] = val # rename keys UpperCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: UpperCamelCase : str = reverse_correct_unfold_reduction_order(_lowerCAmelCase ) if "norm" in key: UpperCamelCase : List[Any] = reverse_correct_unfold_norm_order(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) # verify on image UpperCamelCase : List[str] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" UpperCamelCase : str = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) UpperCamelCase : Dict = SegformerImageProcessor() UpperCamelCase : str = processor(_lowerCAmelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): UpperCamelCase : Any = model(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": UpperCamelCase : List[str] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": UpperCamelCase : int = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": UpperCamelCase : List[str] = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": UpperCamelCase : Optional[Any] = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowerCamelCase : Optional[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
52
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") UpperCamelCase : Any = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) UpperCamelCase : str = model.state_dict() def to_tf_var_name(_lowerCAmelCase ): for patt, repl in iter(_lowerCAmelCase ): UpperCamelCase : str = name.replace(_lowerCAmelCase , _lowerCAmelCase ) return F"""bert/{name}""" def create_tf_var(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : str = tf.dtypes.as_dtype(tensor.dtype ) UpperCamelCase : str = tf.get_variable(dtype=_lowerCAmelCase , shape=tensor.shape , name=_lowerCAmelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_lowerCAmelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase : str = to_tf_var_name(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCamelCase : Any = torch_tensor.T UpperCamelCase : Any = create_tf_var(tensor=_lowerCAmelCase , name=_lowerCAmelCase , session=_lowerCAmelCase ) tf.keras.backend.set_value(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : List[str] = session.run(_lowerCAmelCase ) print(F"""Successfully created {tf_name}: {np.allclose(_lowerCAmelCase , _lowerCAmelCase )}""" ) UpperCamelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def A_ ( _lowerCAmelCase=None ) -> Dict: UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument("--model_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory in which to save tensorflow model" ) UpperCamelCase : List[Any] = parser.parse_args(_lowerCAmelCase ) UpperCamelCase : Dict = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=_lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
52
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = torch.device("""cpu""") def A_ ( ) -> Optional[int]: UpperCamelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im def A_ ( _lowerCAmelCase ) -> str: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[int] = dct.pop(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = val def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : List[str] = [] for k in state_dict.keys(): UpperCamelCase : Any = k if ".pwconv" in k: UpperCamelCase : Tuple = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: UpperCamelCase : Tuple = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: UpperCamelCase : Any = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: UpperCamelCase : Any = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: UpperCamelCase : Optional[int] = k_new.split("." ) if ls[2].isdigit(): UpperCamelCase : List[str] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: UpperCamelCase : Tuple = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase : Any = 1000 UpperCamelCase : int = "huggingface/label-files" UpperCamelCase : Optional[int] = "imagenet-1k-id2label.json" UpperCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase : List[Any] = idalabel UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCamelCase : Dict = [3, 3, 6, 4] UpperCamelCase : int = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCamelCase : Optional[int] = [3, 3, 9, 6] UpperCamelCase : str = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCamelCase : int = [4, 3, 10, 5] UpperCamelCase : str = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCamelCase : Optional[int] = [4, 4, 12, 6] UpperCamelCase : str = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): UpperCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase ) else: UpperCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" ) UpperCamelCase : Dict = checkpoint UpperCamelCase : List[str] = create_rename_keys(_lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model UpperCamelCase : Any = SwiftFormerForImageClassification(_lowerCAmelCase ).eval() hf_model.load_state_dict(_lowerCAmelCase ) # prepare test inputs UpperCamelCase : Dict = prepare_img() UpperCamelCase : int = ViTImageProcessor.from_pretrained("preprocessor_config" ) UpperCamelCase : List[Any] = processor(images=_lowerCAmelCase , return_tensors="pt" ) # compare outputs from both models UpperCamelCase : List[str] = get_expected_output(_lowerCAmelCase ) UpperCamelCase : Any = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _lowerCAmelCase , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") __lowerCamelCase : List[str] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCamelCase : Optional[int] = tau * frequency / samplerate UpperCamelCase : Tuple = sin(_lowerCAmelCase ) UpperCamelCase : List[Any] = cos(_lowerCAmelCase ) UpperCamelCase : str = _sin / (2 * q_factor) UpperCamelCase : List[str] = (1 - _cos) / 2 UpperCamelCase : Any = 1 - _cos UpperCamelCase : int = 1 + alpha UpperCamelCase : List[Any] = -2 * _cos UpperCamelCase : Optional[Any] = 1 - alpha UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCamelCase : Union[str, Any] = tau * frequency / samplerate UpperCamelCase : Dict = sin(_lowerCAmelCase ) UpperCamelCase : int = cos(_lowerCAmelCase ) UpperCamelCase : Dict = _sin / (2 * q_factor) UpperCamelCase : str = (1 + _cos) / 2 UpperCamelCase : List[str] = -1 - _cos UpperCamelCase : Any = 1 + alpha UpperCamelCase : List[str] = -2 * _cos UpperCamelCase : Optional[Any] = 1 - alpha UpperCamelCase : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCamelCase : Union[str, Any] = tau * frequency / samplerate UpperCamelCase : Tuple = sin(_lowerCAmelCase ) UpperCamelCase : List[Any] = cos(_lowerCAmelCase ) UpperCamelCase : str = _sin / (2 * q_factor) UpperCamelCase : List[str] = _sin / 2 UpperCamelCase : Dict = 0 UpperCamelCase : str = -ba UpperCamelCase : Tuple = 1 + alpha UpperCamelCase : int = -2 * _cos UpperCamelCase : Any = 1 - alpha UpperCamelCase : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCamelCase : List[Any] = tau * frequency / samplerate UpperCamelCase : List[str] = sin(_lowerCAmelCase ) UpperCamelCase : Optional[int] = cos(_lowerCAmelCase ) UpperCamelCase : int = _sin / (2 * q_factor) UpperCamelCase : str = 1 - alpha UpperCamelCase : Union[str, Any] = -2 * _cos UpperCamelCase : Optional[Any] = 1 + alpha UpperCamelCase : Any = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCamelCase : Optional[Any] = tau * frequency / samplerate UpperCamelCase : Optional[int] = sin(_lowerCAmelCase ) UpperCamelCase : Any = cos(_lowerCAmelCase ) UpperCamelCase : Tuple = _sin / (2 * q_factor) UpperCamelCase : Any = 10 ** (gain_db / 40) UpperCamelCase : Optional[Any] = 1 + alpha * big_a UpperCamelCase : int = -2 * _cos UpperCamelCase : List[Any] = 1 - alpha * big_a UpperCamelCase : Any = 1 + alpha / big_a UpperCamelCase : List[Any] = -2 * _cos UpperCamelCase : List[str] = 1 - alpha / big_a UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCamelCase : List[Any] = tau * frequency / samplerate UpperCamelCase : int = sin(_lowerCAmelCase ) UpperCamelCase : Tuple = cos(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = _sin / (2 * q_factor) UpperCamelCase : Optional[Any] = 10 ** (gain_db / 40) UpperCamelCase : Tuple = (big_a + 1) - (big_a - 1) * _cos UpperCamelCase : Any = (big_a + 1) + (big_a - 1) * _cos UpperCamelCase : str = (big_a - 1) - (big_a + 1) * _cos UpperCamelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos UpperCamelCase : Tuple = 2 * sqrt(_lowerCAmelCase ) * alpha UpperCamelCase : str = big_a * (pmc + aaa) UpperCamelCase : int = 2 * big_a * mpc UpperCamelCase : Optional[Any] = big_a * (pmc - aaa) UpperCamelCase : Any = ppmc + aaa UpperCamelCase : Optional[Any] = -2 * pmpc UpperCamelCase : int = ppmc - aaa UpperCamelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCamelCase : Optional[int] = tau * frequency / samplerate UpperCamelCase : Union[str, Any] = sin(_lowerCAmelCase ) UpperCamelCase : Optional[int] = cos(_lowerCAmelCase ) UpperCamelCase : Optional[int] = _sin / (2 * q_factor) UpperCamelCase : int = 10 ** (gain_db / 40) UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos UpperCamelCase : str = (big_a + 1) + (big_a - 1) * _cos UpperCamelCase : Tuple = (big_a - 1) - (big_a + 1) * _cos UpperCamelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos UpperCamelCase : Dict = 2 * sqrt(_lowerCAmelCase ) * alpha UpperCamelCase : str = big_a * (ppmc + aaa) UpperCamelCase : Any = -2 * big_a * pmpc UpperCamelCase : int = big_a * (ppmc - aaa) UpperCamelCase : str = pmc + aaa UpperCamelCase : Dict = 2 * mpc UpperCamelCase : List[Any] = pmc - aaa UpperCamelCase : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
52
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : List[str] = logging.get_logger(__name__) def A_ ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Any: UpperCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: UpperCamelCase : Tuple = "" else: UpperCamelCase : Dict = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) UpperCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase : Dict = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase : str = in_proj_bias[: config.hidden_size] UpperCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase : Dict = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase : Optional[int] = in_proj_bias[-config.hidden_size :] def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[Any] = dct.pop(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = val def A_ ( ) -> Optional[Any]: UpperCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : str = DeiTConfig() # all deit models have fine-tuned heads UpperCamelCase : Tuple = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase : List[str] = 1000 UpperCamelCase : int = "huggingface/label-files" UpperCamelCase : Optional[int] = "imagenet-1k-id2label.json" UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) UpperCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase : Tuple = idalabel UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase : int = int(deit_name[-6:-4] ) UpperCamelCase : str = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): UpperCamelCase : Dict = 192 UpperCamelCase : Optional[int] = 768 UpperCamelCase : List[str] = 12 UpperCamelCase : Any = 3 elif deit_name[9:].startswith("small" ): UpperCamelCase : Tuple = 384 UpperCamelCase : Union[str, Any] = 1536 UpperCamelCase : Optional[Any] = 12 UpperCamelCase : Union[str, Any] = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): UpperCamelCase : Dict = 1024 UpperCamelCase : Optional[int] = 4096 UpperCamelCase : str = 24 UpperCamelCase : str = 16 # load original model from timm UpperCamelCase : str = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase : str = timm_model.state_dict() UpperCamelCase : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model UpperCamelCase : List[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor UpperCamelCase : Dict = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCamelCase : Union[str, Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) UpperCamelCase : List[str] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCamelCase : List[Any] = encoding["pixel_values"] UpperCamelCase : Tuple = model(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __lowerCamelCase : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
52
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
1
import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
52
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = StableDiffusionPanoramaPipeline _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase :Any = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) UpperCamelCase : Optional[int] = DDIMScheduler() torch.manual_seed(0 ) UpperCamelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase : Any = CLIPTextModel(A_ ) UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase : Dict = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : int = torch.manual_seed(A_ ) UpperCamelCase : List[str] = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : Optional[Any] = self.get_dummy_components() UpperCamelCase : Dict = StableDiffusionPanoramaPipeline(**A_ ) UpperCamelCase : Tuple = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ ) UpperCamelCase : str = sd_pipe(**A_ ).images UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : Optional[int] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : Optional[Any] = self.get_dummy_components() UpperCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**A_ ) UpperCamelCase : Optional[Any] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Dict = self.get_dummy_inputs(A_ ) UpperCamelCase : List[str] = "french fries" UpperCamelCase : Optional[Any] = sd_pipe(**A_ , negative_prompt=A_ ) UpperCamelCase : Any = output.images UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : int = self.get_dummy_components() UpperCamelCase : List[str] = StableDiffusionPanoramaPipeline(**A_ ) UpperCamelCase : Optional[Any] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ ) UpperCamelCase : Dict = sd_pipe(**A_ , view_batch_size=2 ) UpperCamelCase : Dict = output.images UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : Tuple = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : int = self.get_dummy_components() UpperCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" ) UpperCamelCase : Any = StableDiffusionPanoramaPipeline(**A_ ) UpperCamelCase : Dict = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Optional[Any] = self.get_dummy_inputs(A_ ) UpperCamelCase : Any = sd_pipe(**A_ ).images UpperCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : str = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase : Any = self.get_dummy_components() UpperCamelCase : int = PNDMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=A_ ) UpperCamelCase : List[Any] = StableDiffusionPanoramaPipeline(**A_ ) UpperCamelCase : Optional[Any] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ ) UpperCamelCase : Union[str, Any] = sd_pipe(**A_ ).images UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase : Optional[Any] = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self , A_=0 ): '''simple docstring''' UpperCamelCase : Optional[Any] = torch.manual_seed(A_ ) UpperCamelCase : Optional[Any] = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = "stabilityai/stable-diffusion-2-base" UpperCamelCase : str = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" ) UpperCamelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() UpperCamelCase : List[str] = self.get_inputs() UpperCamelCase : Union[str, Any] = pipe(**A_ ).images UpperCamelCase : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) UpperCamelCase : Any = np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base" , safety_checker=A_ ) UpperCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() UpperCamelCase : str = self.get_inputs() UpperCamelCase : Optional[int] = pipe(**A_ ).images UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) UpperCamelCase : List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = 0 def callback_fn(A_ , A_ , A_ ) -> None: UpperCamelCase : Union[str, Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCamelCase : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) UpperCamelCase : Tuple = latents[0, -3:, -3:, -1] UpperCamelCase : Any = np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCamelCase : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) UpperCamelCase : str = latents[0, -3:, -3:, -1] UpperCamelCase : Union[str, Any] = np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCamelCase : List[str] = False UpperCamelCase : Optional[int] = "stabilityai/stable-diffusion-2-base" UpperCamelCase : List[str] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" ) UpperCamelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) UpperCamelCase : str = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() UpperCamelCase : int = self.get_inputs() pipe(**A_ , callback=A_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __UpperCamelCase( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase : str = "stabilityai/stable-diffusion-2-base" UpperCamelCase : List[str] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" ) UpperCamelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) UpperCamelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase : Any = self.get_inputs() UpperCamelCase : Optional[Any] = pipe(**A_ ) UpperCamelCase : int = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase : str = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("weight",) UpperCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ): # linear layer UpperCamelCase : Optional[Any] = flax_key_tuple[:-1] + ("weight",) UpperCamelCase : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCamelCase : int = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if "metadata" in layer: UpperCamelCase : List[str] = layer.split("metadata" ) UpperCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1] UpperCamelCase : Optional[int] = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: UpperCamelCase : str = layer.split("kvstore" ) UpperCamelCase : List[str] = "".join(split_layer[0] )[:-1] UpperCamelCase : Tuple = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: UpperCamelCase : List[Any] = layer.split("/" ) UpperCamelCase : Union[str, Any] = "/".join(split_layer[:-1] ) UpperCamelCase : int = (split_layer[-1],) if "kvstore/path" in layer: UpperCamelCase : Union[str, Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: UpperCamelCase : List[str] = "file" else: UpperCamelCase : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Optional[int] = rename_keys(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = {} for k, v in current_block.items(): UpperCamelCase : Tuple = v UpperCamelCase : Union[str, Any] = new_current_block torch.save(_lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = WEIGHTS_NAME ) -> Union[str, Any]: UpperCamelCase : List[str] = convert_file_size_to_int(_lowerCAmelCase ) UpperCamelCase : Dict = [] UpperCamelCase : List[str] = {} UpperCamelCase : Any = 0 UpperCamelCase : Any = 0 os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: UpperCamelCase : Union[str, Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] UpperCamelCase : Optional[Any] = flatten_dict(_lowerCAmelCase , sep="/" ) UpperCamelCase : Tuple = {} for layer in checkpoint_info.keys(): UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = get_key_and_tensorstore_dict( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if curr_real_layer_name in all_layers: UpperCamelCase : int = content else: UpperCamelCase : Union[str, Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCamelCase : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCamelCase : Any = torch.tensor(_lowerCAmelCase ) UpperCamelCase : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCamelCase , UpperCamelCase : str = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase ) UpperCamelCase : Optional[Any] = "/".join(_lowerCAmelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCamelCase : Dict = os.path.join( _lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) ) rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCamelCase : List[str] = {} UpperCamelCase : Optional[Any] = 0 UpperCamelCase : List[Any] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) ) rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(_lowerCAmelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCamelCase : List[str] = {} UpperCamelCase : str = {} for idx, shard in enumerate(_lowerCAmelCase ): UpperCamelCase : List[str] = weights_name.replace( ".bin" , F"""-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d} UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) UpperCamelCase : Any = shard for key in shard: UpperCamelCase : List[str] = shard_file # Add the metadata UpperCamelCase : Optional[int] = {"total_size": total_size} UpperCamelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f: UpperCamelCase : Dict = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n" f.write(_lowerCAmelCase ) return metadata, index if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) __lowerCamelCase : Union[str, Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A_ ( ) -> Union[str, Any]: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCamelCase : Tuple = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) UpperCamelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) UpperCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" ) UpperCamelCase : Dict = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." UpperCamelCase : Tuple = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids UpperCamelCase : Tuple = model.generate(_lowerCAmelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
52
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
1
import os def A_ ( ) -> int: with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f: UpperCamelCase : Optional[Any] = [] # noqa: E741 for _ in range(20 ): l.append([int(_lowerCAmelCase ) for x in f.readline().split()] ) UpperCamelCase : Optional[Any] = 0 # right for i in range(20 ): for j in range(17 ): UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCamelCase : Union[str, Any] = temp # down for i in range(17 ): for j in range(20 ): UpperCamelCase : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCamelCase : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCamelCase : List[Any] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): UpperCamelCase : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCamelCase : Optional[int] = temp return maximum if __name__ == "__main__": print(solution())
52
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
1
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class A__ : @staticmethod def __UpperCamelCase( *A_ , **A_ ): '''simple docstring''' pass def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Dict = np.array(_lowerCAmelCase ) UpperCamelCase : Optional[int] = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class A__ ( unittest.TestCase ): _UpperCAmelCase :Tuple = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _UpperCAmelCase :List[str] = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = MaskGenerationPipeline(model=A_ , image_processor=A_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def __UpperCamelCase( self ): '''simple docstring''' pass @slow @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) UpperCamelCase : Optional[Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 ) # Shortening by hashing UpperCamelCase : Optional[Any] = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, {"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67}, {"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93}, {"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09}, {"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79}, {"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34}, {"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16}, {"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12}, {"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99}, {"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52}, {"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32}, {"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16}, {"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99}, {"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83}, {"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64}, {"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43}, {"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08}, {"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35}, {"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26}, {"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62}, {"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99}, {"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86}, {"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84}, {"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73}, {"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71} ] , ) # fmt: on @require_torch @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "facebook/sam-vit-huge" UpperCamelCase : Optional[Any] = pipeline("mask-generation" , model=A_ ) UpperCamelCase : Optional[Any] = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCamelCase : Dict = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53}, ] , )
52
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) __lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def A_ ( _lowerCAmelCase ) -> List[Any]: for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: UpperCamelCase : List[Any] = model_type_to_module_name(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = importlib.import_module(F""".{module_name}""" , "transformers.models" ) try: return getattr(_lowerCAmelCase , _lowerCAmelCase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_lowerCAmelCase , "__name__" , _lowerCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. UpperCamelCase : List[str] = importlib.import_module("transformers" ) if hasattr(_lowerCAmelCase , _lowerCAmelCase ): return getattr(_lowerCAmelCase , _lowerCAmelCase ) return None def A_ ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> Optional[Any]: UpperCamelCase : Optional[Any] = get_file_from_repo( _lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(_lowerCAmelCase , encoding="utf-8" ) as reader: return json.load(_lowerCAmelCase ) class A__ : def __init__( self ): '''simple docstring''' raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(A_ ) def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' UpperCamelCase : Any = kwargs.pop("config" , A_ ) UpperCamelCase : Tuple = kwargs.pop("trust_remote_code" , A_ ) UpperCamelCase : Dict = True UpperCamelCase , UpperCamelCase : Union[str, Any] = FeatureExtractionMixin.get_feature_extractor_dict(A_ , **A_ ) UpperCamelCase : Any = config_dict.get("feature_extractor_type" , A_ ) UpperCamelCase : Any = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): UpperCamelCase : List[str] = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(A_ , A_ ): UpperCamelCase : str = AutoConfig.from_pretrained(A_ , **A_ ) # It could be in `config.feature_extractor_type`` UpperCamelCase : List[Any] = getattr(A_ , "feature_extractor_type" , A_ ) if hasattr(A_ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: UpperCamelCase : str = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: UpperCamelCase : Any = feature_extractor_class_from_name(A_ ) UpperCamelCase : Optional[int] = feature_extractor_auto_map is not None UpperCamelCase : Optional[int] = feature_extractor_class is not None or type(A_ ) in FEATURE_EXTRACTOR_MAPPING UpperCamelCase : int = resolve_trust_remote_code( A_ , A_ , A_ , A_ ) if has_remote_code and trust_remote_code: UpperCamelCase : str = get_class_from_dynamic_module( A_ , A_ , **A_ ) UpperCamelCase : Union[str, Any] = kwargs.pop("code_revision" , A_ ) if os.path.isdir(A_ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(A_ , **A_ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(A_ , **A_ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(A_ ) in FEATURE_EXTRACTOR_MAPPING: UpperCamelCase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(A_ )] return feature_extractor_class.from_dict(A_ , **A_ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(A_ , A_ )
52
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
1
from math import ceil def A_ ( _lowerCAmelCase = 1001 ) -> int: UpperCamelCase : str = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCamelCase : List[str] = 2 * i + 1 UpperCamelCase : Tuple = 2 * i UpperCamelCase : List[Any] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __lowerCamelCase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
52
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
1
import logging import os import threading import time try: import warnings except ImportError: __lowerCamelCase : Tuple = None try: import msvcrt except ImportError: __lowerCamelCase : Optional[Any] = None try: import fcntl except ImportError: __lowerCamelCase : int = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowerCamelCase : Dict = OSError # Data # ------------------------------------------------ __lowerCamelCase : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowerCamelCase : Optional[int] = """3.0.12""" __lowerCamelCase : List[Any] = None def A_ ( ) -> Union[str, Any]: global _logger UpperCamelCase : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __snake_case ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = lock_file return None def __str__( self ): '''simple docstring''' UpperCamelCase : Dict = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.lock.release() return None class A__ : def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCamelCase : Optional[int] = self.hash_filename_if_too_long(A_ , A_ ) # The path to the lock file. UpperCamelCase : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase : Optional[Any] = None # The default timeout value. UpperCamelCase : Any = timeout # We use this lock primarily for the lock counter. UpperCamelCase : str = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase : Union[str, Any] = 0 return None @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file @property def __UpperCamelCase( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Any = float(A_ ) return None def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() def __UpperCamelCase( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCamelCase( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCamelCase( self , A_=None , A_=0.05 ): '''simple docstring''' if timeout is None: UpperCamelCase : int = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase : Optional[int] = id(self ) UpperCamelCase : Tuple = self._lock_file UpperCamelCase : Dict = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(A_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase : int = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCamelCase( self , A_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase : List[Any] = id(self ) UpperCamelCase : Union[str, Any] = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCamelCase : Optional[Any] = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , A_ , A_ , A_ ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=A_ ) return None def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = os.path.basename(A_ ) if len(A_ ) > max_length and max_length > 0: UpperCamelCase : List[str] = os.path.dirname(A_ ) UpperCamelCase : Dict = str(hash(A_ ) ) UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(A_ , A_ ) else: return path class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) UpperCamelCase : str = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase : Dict = os.open(self._lock_file , A_ ) except OSError: pass else: try: msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[int] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self._lock_file_fd UpperCamelCase : int = None msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 ) os.close(A_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __snake_case ): def __init__( self , A_ , A_=-1 , A_=None ): '''simple docstring''' UpperCamelCase : List[Any] = os.statvfs(os.path.dirname(A_ ) ).f_namemax super().__init__(A_ , timeout=A_ , max_filename_length=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase : Union[str, Any] = os.open(self._lock_file , A_ ) try: fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(A_ ) else: UpperCamelCase : Optional[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self._lock_file_fd UpperCamelCase : List[Any] = None fcntl.flock(A_ , fcntl.LOCK_UN ) os.close(A_ ) return None class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase : str = os.open(self._lock_file , A_ ) except OSError: pass else: UpperCamelCase : List[Any] = fd return None def __UpperCamelCase( self ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase : Optional[int] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowerCamelCase : List[Any] = None if msvcrt: __lowerCamelCase : int = WindowsFileLock elif fcntl: __lowerCamelCase : int = UnixFileLock else: __lowerCamelCase : List[Any] = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
52
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available __lowerCamelCase : Dict = logging.getLogger(__name__) @dataclass class A__ : _UpperCAmelCase :str _UpperCAmelCase :List[str] _UpperCAmelCase :Optional[List[str]] @dataclass class A__ : _UpperCAmelCase :List[int] _UpperCAmelCase :List[int] _UpperCAmelCase :Optional[List[int]] = None _UpperCAmelCase :Optional[List[int]] = None class A__ ( __snake_case ): _UpperCAmelCase :List[str] = 'train' _UpperCAmelCase :Dict = 'dev' _UpperCAmelCase :str = 'test' class A__ : @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' raise NotImplementedError @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' raise NotImplementedError @staticmethod def __UpperCamelCase( A_ , A_ , A_ , A_ , A_=False , A_="[CLS]" , A_=1 , A_="[SEP]" , A_=False , A_=False , A_=0 , A_=0 , A_=-100 , A_=0 , A_=True , ): '''simple docstring''' UpperCamelCase : Optional[Any] = {label: i for i, label in enumerate(A_ )} UpperCamelCase : List[str] = [] for ex_index, example in enumerate(A_ ): if ex_index % 1_0000 == 0: logger.info("Writing example %d of %d" , A_ , len(A_ ) ) UpperCamelCase : Optional[int] = [] UpperCamelCase : Optional[Any] = [] for word, label in zip(example.words , example.labels ): UpperCamelCase : str = tokenizer.tokenize(A_ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(A_ ) > 0: tokens.extend(A_ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. UpperCamelCase : Tuple = tokenizer.num_special_tokens_to_add() if len(A_ ) > max_seq_length - special_tokens_count: UpperCamelCase : str = tokens[: (max_seq_length - special_tokens_count)] UpperCamelCase : str = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] UpperCamelCase : Optional[int] = [sequence_a_segment_id] * len(A_ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: UpperCamelCase : Optional[Any] = [cls_token] + tokens UpperCamelCase : Optional[Any] = [pad_token_label_id] + label_ids UpperCamelCase : Any = [cls_token_segment_id] + segment_ids UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(A_ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. UpperCamelCase : List[str] = [1 if mask_padding_with_zero else 0] * len(A_ ) # Zero-pad up to the sequence length. UpperCamelCase : Tuple = max_seq_length - len(A_ ) if pad_on_left: UpperCamelCase : str = ([pad_token] * padding_length) + input_ids UpperCamelCase : Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask UpperCamelCase : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids UpperCamelCase : int = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(A_ ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(A_ ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(A_ ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(A_ ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(A_ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: UpperCamelCase : Tuple = None features.append( InputFeatures( input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , label_ids=A_ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class A__ ( __snake_case ): _UpperCAmelCase :List[InputFeatures] _UpperCAmelCase :int = nn.CrossEntropyLoss().ignore_index def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = os.path.join( A_ , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A_ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCamelCase : List[Any] = cached_features_file + ".lock" with FileLock(A_ ): if os.path.exists(A_ ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) UpperCamelCase : Dict = torch.load(A_ ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) UpperCamelCase : List[Any] = token_classification_task.read_examples_from_file(A_ , A_ ) # TODO clean up all this to leverage built-in features of tokenizers UpperCamelCase : int = token_classification_task.convert_examples_to_features( A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , A_ ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , A_ ): '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class A__ : _UpperCAmelCase :List[InputFeatures] _UpperCAmelCase :int = -1_0_0 def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ): '''simple docstring''' UpperCamelCase : str = token_classification_task.read_examples_from_file(A_ , A_ ) # TODO clean up all this to leverage built-in features of tokenizers UpperCamelCase : str = token_classification_task.convert_examples_to_features( A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator( A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator( A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , A_ ): '''simple docstring''' return self.features[i]
52
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __snake_case , ) class A__ ( __snake_case ): _UpperCAmelCase :str = RobertaConfig _UpperCAmelCase :Any = 'roberta' def __init__( self , A_ ): '''simple docstring''' super().__init__(A_ ) UpperCamelCase : Union[str, Any] = RobertaEmbeddings(A_ ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __snake_case , ) class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = RobertaConfig _UpperCAmelCase :List[Any] = 'roberta' def __init__( self , A_ ): '''simple docstring''' super().__init__(A_ ) UpperCamelCase : Any = config.num_labels UpperCamelCase : int = config.num_hidden_layers UpperCamelCase : Union[str, Any] = DeeRobertaModel(A_ ) UpperCamelCase : str = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase : List[str] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(A_ ) def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=-1 , A_=False , ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.num_layers try: UpperCamelCase : Any = self.roberta( A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , ) UpperCamelCase : Optional[int] = outputs[1] UpperCamelCase : Tuple = self.dropout(A_ ) UpperCamelCase : str = self.classifier(A_ ) UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCamelCase : Optional[Any] = e.message UpperCamelCase : Dict = e.exit_layer UpperCamelCase : Optional[Any] = outputs[0] if not self.training: UpperCamelCase : Optional[int] = entropy(A_ ) UpperCamelCase : str = [] UpperCamelCase : Dict = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCamelCase : List[Any] = MSELoss() UpperCamelCase : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase : List[str] = CrossEntropyLoss() UpperCamelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCamelCase : Optional[int] = [] for highway_exit in outputs[-1]: UpperCamelCase : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(A_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCamelCase : Dict = MSELoss() UpperCamelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase : Union[str, Any] = CrossEntropyLoss() UpperCamelCase : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(A_ ) if train_highway: UpperCamelCase : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCamelCase : int = (loss,) + outputs if not self.training: UpperCamelCase : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCamelCase : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
52
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> int: UpperCamelCase : List[str] = {} if train_file is not None: UpperCamelCase : List[Any] = [train_file] if eval_file is not None: UpperCamelCase : Dict = [eval_file] if test_file is not None: UpperCamelCase : Any = [test_file] UpperCamelCase : str = datasets.load_dataset("csv" , data_files=_lowerCAmelCase ) UpperCamelCase : Dict = list(ds[list(files.keys() )[0]].features.keys() ) UpperCamelCase : Union[str, Any] = features_name.pop(_lowerCAmelCase ) UpperCamelCase : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) UpperCamelCase : Any = {label: i for i, label in enumerate(_lowerCAmelCase )} UpperCamelCase : Union[str, Any] = tokenizer.model_input_names UpperCamelCase : Any = {} if len(_lowerCAmelCase ) == 1: for k in files.keys(): UpperCamelCase : Dict = ds[k].map( lambda _lowerCAmelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" ) , batched=_lowerCAmelCase , ) elif len(_lowerCAmelCase ) == 2: for k in files.keys(): UpperCamelCase : Any = ds[k].map( lambda _lowerCAmelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , ) , batched=_lowerCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: UpperCamelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase : Optional[int] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names} UpperCamelCase : List[str] = labelaid[ex[label_name]] yield (d, label) UpperCamelCase : Dict = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: UpperCamelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) UpperCamelCase : Optional[int] = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: UpperCamelCase : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) UpperCamelCase : Tuple = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: UpperCamelCase : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCamelCase : Tuple = logging.getLogger(__name__) @dataclass class A__ : _UpperCAmelCase :int = field(metadata={'help': 'Which column contains the label'} ) _UpperCAmelCase :str = field(default=__snake_case , metadata={'help': 'The path of the training file'} ) _UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the development file'} ) _UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the test file'} ) _UpperCAmelCase :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _UpperCAmelCase :bool = field( default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : _UpperCAmelCase :str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCAmelCase :bool = field(default=__snake_case , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase :Optional[str] = field( default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def A_ ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) UpperCamelCase , UpperCamelCase , UpperCamelCase : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) UpperCamelCase : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): UpperCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(_lowerCAmelCase ) -> Dict: UpperCamelCase : List[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer UpperCamelCase : List[Any] = TFTrainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCamelCase : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCamelCase : Optional[Any] = trainer.evaluate() UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(_lowerCAmelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(_lowerCAmelCase ) return results if __name__ == "__main__": main()
52
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
1
__lowerCamelCase : Union[str, Any] = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) __lowerCamelCase : int = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: UpperCamelCase : Tuple = from_type.lower().strip("s" ) UpperCamelCase : int = to_type.lower().strip("s" ) UpperCamelCase : Tuple = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Union[str, Any] = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase ) if from_sanitized not in METRIC_CONVERSION: UpperCamelCase : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}""" ) raise ValueError(_lowerCAmelCase ) if to_sanitized not in METRIC_CONVERSION: UpperCamelCase : List[str] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}""" ) raise ValueError(_lowerCAmelCase ) UpperCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized] UpperCamelCase : Optional[Any] = METRIC_CONVERSION[to_sanitized] UpperCamelCase : Any = 1 if from_exponent > to_exponent: UpperCamelCase : List[Any] = from_exponent - to_exponent else: UpperCamelCase : List[Any] = -(to_exponent - from_exponent) return value * pow(10 , _lowerCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
52
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
1
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
1
import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def A_ ( _lowerCAmelCase ) -> Optional[int]: if "cls_token" in name: UpperCamelCase : Union[str, Any] = name.replace("cls_token" , "vit.embeddings.cls_token" ) if "mask_token" in name: UpperCamelCase : str = name.replace("mask_token" , "decoder.mask_token" ) if "decoder_pos_embed" in name: UpperCamelCase : List[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: UpperCamelCase : List[Any] = name.replace("pos_embed" , "vit.embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCamelCase : List[Any] = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: UpperCamelCase : str = name.replace("patch_embed.norm" , "vit.embeddings.norm" ) if "decoder_blocks" in name: UpperCamelCase : Any = name.replace("decoder_blocks" , "decoder.decoder_layers" ) if "blocks" in name: UpperCamelCase : Any = name.replace("blocks" , "vit.encoder.layer" ) if "attn.proj" in name: UpperCamelCase : Any = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCamelCase : Union[str, Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCamelCase : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCamelCase : List[Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCamelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCamelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: UpperCamelCase : Any = name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: UpperCamelCase : Union[str, Any] = name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: UpperCamelCase : List[Any] = name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name: UpperCamelCase : Dict = name.replace("norm.weight" , "vit.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name: UpperCamelCase : List[Any] = name.replace("norm.bias" , "vit.layernorm.bias" ) return name def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for key in orig_state_dict.copy().keys(): UpperCamelCase : int = orig_state_dict.pop(_lowerCAmelCase ) if "qkv" in key: UpperCamelCase : List[str] = key.split("." ) UpperCamelCase : List[Any] = int(key_split[1] ) if "decoder_blocks" in key: UpperCamelCase : Optional[Any] = config.decoder_hidden_size UpperCamelCase : Union[str, Any] = "decoder.decoder_layers." if "weight" in key: UpperCamelCase : List[Any] = val[:dim, :] UpperCamelCase : Tuple = val[dim : dim * 2, :] UpperCamelCase : List[str] = val[-dim:, :] elif "bias" in key: UpperCamelCase : Tuple = val[:dim] UpperCamelCase : Union[str, Any] = val[dim : dim * 2] UpperCamelCase : str = val[-dim:] else: UpperCamelCase : Union[str, Any] = config.hidden_size UpperCamelCase : Union[str, Any] = "vit.encoder.layer." if "weight" in key: UpperCamelCase : Dict = val[:dim, :] UpperCamelCase : List[Any] = val[dim : dim * 2, :] UpperCamelCase : Any = val[-dim:, :] elif "bias" in key: UpperCamelCase : List[str] = val[:dim] UpperCamelCase : Optional[Any] = val[dim : dim * 2] UpperCamelCase : Optional[Any] = val[-dim:] else: UpperCamelCase : List[str] = val return orig_state_dict def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Union[str, Any] = ViTMAEConfig() if "large" in checkpoint_url: UpperCamelCase : Optional[Any] = 1024 UpperCamelCase : Union[str, Any] = 4096 UpperCamelCase : List[Any] = 24 UpperCamelCase : Optional[Any] = 16 elif "huge" in checkpoint_url: UpperCamelCase : List[str] = 14 UpperCamelCase : Dict = 1280 UpperCamelCase : str = 5120 UpperCamelCase : Any = 32 UpperCamelCase : Any = 16 UpperCamelCase : Optional[Any] = ViTMAEForPreTraining(_lowerCAmelCase ) UpperCamelCase : Dict = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"] UpperCamelCase : Dict = ViTMAEImageProcessor(size=config.image_size ) UpperCamelCase : Any = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() UpperCamelCase : str = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg" UpperCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) UpperCamelCase : List[Any] = ViTMAEImageProcessor(size=config.image_size ) UpperCamelCase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) UpperCamelCase : str = model(**_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = outputs.logits if "large" in checkpoint_url: UpperCamelCase : Optional[int] = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: UpperCamelCase : Optional[Any] = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: UpperCamelCase : int = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __lowerCamelCase : Optional[int] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
52
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
1
class A__ : def __init__( self , A_ , A_=None , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = data UpperCamelCase : Optional[int] = previous UpperCamelCase : Optional[Any] = next_node def __str__( self ): '''simple docstring''' return F"""{self.data}""" def __UpperCamelCase( self ): '''simple docstring''' return self.data def __UpperCamelCase( self ): '''simple docstring''' return self.next def __UpperCamelCase( self ): '''simple docstring''' return self.previous class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = head def __iter__( self ): '''simple docstring''' return self def __UpperCamelCase( self ): '''simple docstring''' if not self.current: raise StopIteration else: UpperCamelCase : Optional[Any] = self.current.get_data() UpperCamelCase : List[str] = self.current.get_next() return value class A__ : def __init__( self ): '''simple docstring''' UpperCamelCase : Any = None # First node in list UpperCamelCase : Optional[int] = None # Last node in list def __str__( self ): '''simple docstring''' UpperCamelCase : str = self.head UpperCamelCase : Tuple = [] while current is not None: nodes.append(current.get_data() ) UpperCamelCase : Dict = current.get_next() return " ".join(str(A_ ) for node in nodes ) def __contains__( self , A_ ): '''simple docstring''' UpperCamelCase : int = self.head while current: if current.get_data() == value: return True UpperCamelCase : Tuple = current.get_next() return False def __iter__( self ): '''simple docstring''' return LinkedListIterator(self.head ) def __UpperCamelCase( self ): '''simple docstring''' if self.head: return self.head.get_data() return None def __UpperCamelCase( self ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: UpperCamelCase : List[Any] = node UpperCamelCase : Union[str, Any] = node else: self.insert_before_node(self.head , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: self.set_head(A_ ) else: self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = Node(A_ ) if self.head is None: self.set_head(A_ ) else: self.set_tail(A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = node UpperCamelCase : List[Any] = node.previous if node.get_previous() is None: UpperCamelCase : str = node_to_insert else: UpperCamelCase : Dict = node_to_insert UpperCamelCase : Optional[Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = node UpperCamelCase : Optional[Any] = node.next if node.get_next() is None: UpperCamelCase : List[str] = node_to_insert else: UpperCamelCase : int = node_to_insert UpperCamelCase : Union[str, Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = Node(A_ ) UpperCamelCase : Optional[int] = self.head while node: if current_position == position: self.insert_before_node(A_ , A_ ) return current_position += 1 UpperCamelCase : Union[str, Any] = node.next self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while node: if node.get_data() == item: return node UpperCamelCase : int = node.get_next() raise Exception("Node not found" ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if (node := self.get_node(A_ )) is not None: if node == self.head: UpperCamelCase : Optional[Any] = self.head.get_next() if node == self.tail: UpperCamelCase : Dict = self.tail.get_previous() self.remove_node_pointers(A_ ) @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' if node.get_next(): UpperCamelCase : List[str] = node.previous if node.get_previous(): UpperCamelCase : Dict = node.next UpperCamelCase : Any = None UpperCamelCase : Optional[Any] = None def __UpperCamelCase( self ): '''simple docstring''' return self.head is None def A_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
52
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
1
import os import sys __lowerCamelCase : Any = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCamelCase : Dict = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]: return AutoConfig.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: return AutoTokenizer.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: return AutoModel.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple: return AutoModelForCausalLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: return AutoModelForMaskedLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> int: return AutoModelForSequenceClassification.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> int: return AutoModelForQuestionAnswering.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
52
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
1
import pprint import requests __lowerCamelCase : Tuple = """https://zenquotes.io/api""" def A_ ( ) -> list: return requests.get(API_ENDPOINT_URL + "/today" ).json() def A_ ( ) -> list: return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": __lowerCamelCase : Optional[int] = random_quotes() pprint.pprint(response)
52
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : str = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
1
# Imports import numpy as np class A__ : def __init__( self , A_=None , A_=None , A_=None , A_=None , A_=None ): '''simple docstring''' self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ ) def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None ): '''simple docstring''' if red is not None: UpperCamelCase : Tuple = red if green is not None: UpperCamelCase : List[str] = green if blue is not None: UpperCamelCase : Tuple = blue if red_edge is not None: UpperCamelCase : List[Any] = red_edge if nir is not None: UpperCamelCase : Dict = nir return True def __UpperCamelCase( self , A_="" , A_=None , A_=None , A_=None , A_=None , A_=None ): '''simple docstring''' self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ ) UpperCamelCase : Any = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def __UpperCamelCase( self ): '''simple docstring''' return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def __UpperCamelCase( self ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def __UpperCamelCase( self ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def __UpperCamelCase( self ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def __UpperCamelCase( self ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def __UpperCamelCase( self , A_=0.08 , A_=1.22 , A_=0.03 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def __UpperCamelCase( self ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir / self.green) - 1 def __UpperCamelCase( self ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def __UpperCamelCase( self ): '''simple docstring''' return (self.red - self.blue) / self.red def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def __UpperCamelCase( self ): '''simple docstring''' return self.nir - self.green def __UpperCamelCase( self ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def __UpperCamelCase( self , A_=0.16 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def __UpperCamelCase( self , A_=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def __UpperCamelCase( self ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def __UpperCamelCase( self , A_=None , A_=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def __UpperCamelCase( self ): '''simple docstring''' return (self.red + self.green + self.blue) / 30.5 def __UpperCamelCase( self ): '''simple docstring''' return self.nir / self.red def __UpperCamelCase( self ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def __UpperCamelCase( self ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def __UpperCamelCase( self ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def __UpperCamelCase( self ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def __UpperCamelCase( self ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def __UpperCamelCase( self ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def __UpperCamelCase( self ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) UpperCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def __UpperCamelCase( self ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def __UpperCamelCase( self ): '''simple docstring''' return self.nir / self.red def __UpperCamelCase( self ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def __UpperCamelCase( self ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
52
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class A__ ( __snake_case ): _UpperCAmelCase :Tuple = 'trajectory_transformer' _UpperCAmelCase :Union[str, Any] = ['past_key_values'] _UpperCAmelCase :Optional[Any] = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.00_06 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ): '''simple docstring''' UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Optional[int] = action_weight UpperCamelCase : Optional[int] = reward_weight UpperCamelCase : Union[str, Any] = value_weight UpperCamelCase : Optional[int] = max_position_embeddings UpperCamelCase : Union[str, Any] = block_size UpperCamelCase : Dict = action_dim UpperCamelCase : List[Any] = observation_dim UpperCamelCase : str = transition_dim UpperCamelCase : Optional[Any] = learning_rate UpperCamelCase : Optional[int] = n_layer UpperCamelCase : Optional[int] = n_head UpperCamelCase : Union[str, Any] = n_embd UpperCamelCase : int = embd_pdrop UpperCamelCase : List[Any] = attn_pdrop UpperCamelCase : Union[str, Any] = resid_pdrop UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : int = layer_norm_eps UpperCamelCase : Union[str, Any] = kaiming_initializer_range UpperCamelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
1
from argparse import ArgumentParser from .env import EnvironmentCommand def A_ ( ) -> str: UpperCamelCase : List[str] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" ) UpperCamelCase : Optional[Any] = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(_lowerCAmelCase ) # Let's go UpperCamelCase : Optional[int] = parser.parse_args() if not hasattr(_lowerCAmelCase , "func" ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
52
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __lowerCamelCase : Any = logging.get_logger(__name__) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: def run_func(_lowerCAmelCase ): @wraps(_lowerCAmelCase ) def run_in_eager_mode(*_lowerCAmelCase , **_lowerCAmelCase ): return func(*_lowerCAmelCase , **_lowerCAmelCase ) @wraps(_lowerCAmelCase ) @tf.function(experimental_compile=_lowerCAmelCase ) def run_in_graph_mode(*_lowerCAmelCase , **_lowerCAmelCase ): return func(*_lowerCAmelCase , **_lowerCAmelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> ["tf.Tensor"]: UpperCamelCase : Optional[Any] = random.Random() UpperCamelCase : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class A__ ( __snake_case ): _UpperCAmelCase :TensorFlowBenchmarkArguments _UpperCAmelCase :PretrainedConfig _UpperCAmelCase :str = "TensorFlow" @property def __UpperCamelCase( self ): '''simple docstring''' return tf.__version__ def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) UpperCamelCase : Optional[Any] = self._prepare_inference_func(A_ , A_ , A_ ) return self._measure_speed(_inference ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) UpperCamelCase : Any = self._prepare_train_func(A_ , A_ , A_ ) return self._measure_speed(_train ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ ) UpperCamelCase : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) UpperCamelCase : Union[str, Any] = self._prepare_inference_func(A_ , A_ , A_ ) return self._measure_memory(_inference ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ ) UpperCamelCase : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) UpperCamelCase : Union[str, Any] = self._prepare_train_func(A_ , A_ , A_ ) return self._measure_memory(_train ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) UpperCamelCase : Union[str, Any] = ( hasattr(A_ , "architectures" ) and isinstance(config.architectures , A_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: UpperCamelCase : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model UpperCamelCase : str = __import__("transformers" , fromlist=[model_class] ) UpperCamelCase : Dict = getattr(A_ , A_ ) UpperCamelCase : List[str] = model_cls(A_ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: UpperCamelCase : Tuple = TF_MODEL_MAPPING[config.__class__](A_ ) # encoder-decoder has vocab size saved differently UpperCamelCase : Optional[int] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size UpperCamelCase : str = random_input_ids(A_ , A_ , A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(A_ , decoder_input_ids=A_ , training=A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(A_ , training=A_ ) UpperCamelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) UpperCamelCase : List[Any] = ( hasattr(A_ , "architectures" ) and isinstance(config.architectures , A_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: UpperCamelCase : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model UpperCamelCase : Dict = __import__("transformers" , fromlist=[model_class] ) UpperCamelCase : Any = getattr(A_ , A_ ) UpperCamelCase : List[str] = model_cls(A_ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: UpperCamelCase : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ ) # encoder-decoder has vocab size saved differently UpperCamelCase : Optional[Any] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size UpperCamelCase : Any = random_input_ids(A_ , A_ , A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): UpperCamelCase : List[Any] = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0] UpperCamelCase : Dict = tf.gradients(A_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ , training=A_ )[0] UpperCamelCase : Optional[int] = tf.gradients(A_ , model.trainable_variables ) return gradients UpperCamelCase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __UpperCamelCase( self , A_ ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(A_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average UpperCamelCase : str = timeit.repeat( A_ , repeat=self.args.repeat , number=10 , ) return min(A_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) def __UpperCamelCase( self , A_ ): '''simple docstring''' logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) UpperCamelCase : Dict = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) UpperCamelCase : Any = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() UpperCamelCase : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) UpperCamelCase : int = nvml.nvmlDeviceGetMemoryInfo(A_ ) UpperCamelCase : Tuple = meminfo.used UpperCamelCase : List[str] = Memory(A_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) UpperCamelCase : Optional[Any] = None else: UpperCamelCase : List[str] = measure_peak_memory_cpu(A_ ) UpperCamelCase : Dict = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes if self.args.trace_memory_line_by_line: UpperCamelCase : int = stop_memory_tracing(A_ ) if memory is None: UpperCamelCase : str = summary.total else: UpperCamelCase : List[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) return "N/A", None
52
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase : Optional[Any] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""GLPNFeatureExtractor"""] __lowerCamelCase : int = ["""GLPNImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ """GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""", """GLPNForDepthEstimation""", """GLPNLayer""", """GLPNModel""", """GLPNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys __lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
1
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: UpperCamelCase : Optional[int] = tmp_path / "cache" UpperCamelCase : Any = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Any = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} UpperCamelCase : List[str] = features.copy() if features else default_expected_features UpperCamelCase : Optional[Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = tmp_path / "cache" UpperCamelCase : Optional[Any] = {"text": "string"} UpperCamelCase : List[str] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : str = text_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : Tuple = [text_path] UpperCamelCase : Union[str, Any] = tmp_path / "cache" UpperCamelCase : Any = {"text": "string"} UpperCamelCase : Optional[Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=("train",) ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: UpperCamelCase : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Union[str, Any] = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Tuple = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" UpperCamelCase : Optional[Any] = {"text": "string"} UpperCamelCase : Dict = features.copy() if features else default_expected_features UpperCamelCase : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Optional[int] = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: if split: UpperCamelCase : int = {split: text_path} else: UpperCamelCase : Optional[int] = "train" UpperCamelCase : str = {"train": text_path, "test": text_path} UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} UpperCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
52
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
1
__lowerCamelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ __lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __lowerCamelCase : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
52
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Any = data UpperCamelCase : Union[str, Any] = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0] @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64) UpperCamelCase : List[str] = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def __UpperCamelCase( self ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = list(struct.unpack(">16L" , A_ ) ) + [0] * 64 for i in range(16 , 80 ): UpperCamelCase : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.padding() UpperCamelCase : List[str] = self.split_blocks() for block in self.blocks: UpperCamelCase : Tuple = self.expand_block(A_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: UpperCamelCase : Any = (b & c) | ((~b) & d) UpperCamelCase : List[str] = 0x5_A_8_2_7_9_9_9 elif 20 <= i < 40: UpperCamelCase : Tuple = b ^ c ^ d UpperCamelCase : Optional[int] = 0x6_E_D_9_E_B_A_1 elif 40 <= i < 60: UpperCamelCase : Optional[int] = (b & c) | (b & d) | (c & d) UpperCamelCase : Optional[Any] = 0x8_F_1_B_B_C_D_C elif 60 <= i < 80: UpperCamelCase : List[str] = b ^ c ^ d UpperCamelCase : List[Any] = 0xC_A_6_2_C_1_D_6 UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = ( self.rotate(A_ , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F, a, self.rotate(A_ , 30 ), c, d, ) UpperCamelCase : Tuple = ( self.h[0] + a & 0xF_F_F_F_F_F_F_F, self.h[1] + b & 0xF_F_F_F_F_F_F_F, self.h[2] + c & 0xF_F_F_F_F_F_F_F, self.h[3] + d & 0xF_F_F_F_F_F_F_F, self.h[4] + e & 0xF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h ) def A_ ( ) -> Any: UpperCamelCase : List[Any] = b"Test String" assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324 def A_ ( ) -> Any: UpperCamelCase : Tuple = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : Union[str, Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: UpperCamelCase : str = f.read() else: UpperCamelCase : int = bytes(_lowerCAmelCase , "utf-8" ) print(SHAaHash(_lowerCAmelCase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
52
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase : Tuple = logging.get_logger(__name__) def A_ ( _lowerCAmelCase ) -> List[List[ImageInput]]: if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowerCAmelCase ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = ['pixel_values'] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : List[Any] = size if size is not None else {"shortest_edge": 224} UpperCamelCase : int = get_size_dict(A_ , default_to_square=A_ ) UpperCamelCase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCamelCase : Optional[Any] = get_size_dict(A_ , param_name="crop_size" ) UpperCamelCase : Any = do_resize UpperCamelCase : int = size UpperCamelCase : Tuple = do_center_crop UpperCamelCase : str = crop_size UpperCamelCase : Tuple = resample UpperCamelCase : str = do_rescale UpperCamelCase : Tuple = rescale_factor UpperCamelCase : int = do_normalize UpperCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : int = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" in size: UpperCamelCase : str = get_resize_output_image_size(A_ , size["shortest_edge"] , default_to_square=A_ ) elif "height" in size and "width" in size: UpperCamelCase : Any = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Optional[Any] = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCamelCase : int = to_numpy_array(A_ ) if do_resize: UpperCamelCase : str = self.resize(image=A_ , size=A_ , resample=A_ ) if do_center_crop: UpperCamelCase : Dict = self.center_crop(A_ , size=A_ ) if do_rescale: UpperCamelCase : int = self.rescale(image=A_ , scale=A_ ) if do_normalize: UpperCamelCase : Union[str, Any] = self.normalize(image=A_ , mean=A_ , std=A_ ) UpperCamelCase : List[Any] = to_channel_dimension_format(A_ , A_ ) return image def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ): '''simple docstring''' UpperCamelCase : int = do_resize if do_resize is not None else self.do_resize UpperCamelCase : int = resample if resample is not None else self.resample UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase : int = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean UpperCamelCase : Any = image_std if image_std is not None else self.image_std UpperCamelCase : List[Any] = size if size is not None else self.size UpperCamelCase : Optional[Any] = get_size_dict(A_ , default_to_square=A_ ) UpperCamelCase : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCamelCase : List[str] = make_batched(A_ ) UpperCamelCase : Tuple = [ [ self._preprocess_image( image=A_ , do_resize=A_ , size=A_ , resample=A_ , do_center_crop=A_ , crop_size=A_ , do_rescale=A_ , rescale_factor=A_ , do_normalize=A_ , image_mean=A_ , image_std=A_ , data_format=A_ , ) for img in video ] for video in videos ] UpperCamelCase : str = {"pixel_values": videos} return BatchFeature(data=A_ , tensor_type=A_ )
52
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
1
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float: if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
52
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Dict = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
1
import os def A_ ( ) -> List[str]: UpperCamelCase : Optional[Any] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) ) UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , "triangle.txt" ) with open(_lowerCAmelCase ) as f: UpperCamelCase : Optional[int] = f.readlines() UpperCamelCase : Tuple = [] for line in triangle: UpperCamelCase : int = [] for number in line.strip().split(" " ): numbers_from_line.append(int(_lowerCAmelCase ) ) a.append(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): for j in range(len(a[i] ) ): UpperCamelCase : Dict = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCamelCase : Tuple = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
52
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
1
# Function to print upper half of diamond (pyramid) def A_ ( _lowerCAmelCase ) -> Dict: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(" " , end="" ) for _ in range(0 , i + 1 ): # printing stars print("* " , end="" ) print() def A_ ( _lowerCAmelCase ) -> Union[str, Any]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("* " , end="" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(" " , end="" ) def A_ ( _lowerCAmelCase ) -> Optional[int]: if n <= 0: print(" ... .... nothing printing :(" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") __lowerCamelCase : Optional[Any] = 1 while K: __lowerCamelCase : Union[str, Any] = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) __lowerCamelCase : int = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
52
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
1
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCamelCase : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
52
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
1
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=False , A_=True , A_=False , A_=False , A_=19 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : str = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : int = seq_length UpperCamelCase : List[str] = is_training UpperCamelCase : List[str] = use_input_mask UpperCamelCase : Union[str, Any] = use_token_type_ids UpperCamelCase : str = use_labels UpperCamelCase : int = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Dict = num_hidden_layers UpperCamelCase : Dict = num_attention_heads UpperCamelCase : Tuple = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Optional[Any] = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : List[str] = max_position_embeddings UpperCamelCase : List[Any] = type_vocab_size UpperCamelCase : int = type_sequence_label_size UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : str = num_labels UpperCamelCase : List[str] = num_choices UpperCamelCase : int = scope def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = None if self.use_input_mask: UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Optional[int] = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=A_ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = EsmForProteinFolding(config=A_ ).float() model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ ) UpperCamelCase : Optional[int] = model(A_ ) UpperCamelCase : Tuple = model(A_ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :int = False _UpperCAmelCase :Tuple = (EsmForProteinFolding,) if is_torch_available() else () _UpperCAmelCase :Optional[int] = () _UpperCAmelCase :Optional[int] = {} if is_torch_available() else {} _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = EsmFoldModelTester(self ) UpperCamelCase : Tuple = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) @unittest.skip("Does not support attention outputs" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support passing input embeds!" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold only has one output format." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold does not support input chunking." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support data parallel." ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase( self ): '''simple docstring''' pass @require_torch class A__ ( __snake_case ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() UpperCamelCase : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCamelCase : List[str] = model(A_ )["positions"] UpperCamelCase : Union[str, Any] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , A_ , atol=1e-4 ) )
52
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = tempfile.mkdtemp() # fmt: off UpperCamelCase : Union[str, Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCamelCase : int = dict(zip(A_ , range(len(A_ ) ) ) ) UpperCamelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCamelCase : int = {"unk_token": "<unk>"} UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(A_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(A_ ) ) UpperCamelCase : Optional[int] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase : Tuple = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(A_ , A_ ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **A_ ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **A_ ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.get_tokenizer() UpperCamelCase : List[Any] = self.get_rust_tokenizer() UpperCamelCase : Tuple = self.get_image_processor() UpperCamelCase : List[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) UpperCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCamelCase : List[str] = self.get_image_processor(do_normalize=A_ ) UpperCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.get_image_processor() UpperCamelCase : Union[str, Any] = self.get_tokenizer() UpperCamelCase : Dict = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Dict = self.prepare_image_inputs() UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="np" ) UpperCamelCase : Tuple = processor(images=A_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.get_image_processor() UpperCamelCase : Dict = self.get_tokenizer() UpperCamelCase : str = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Optional[Any] = "lower newer" UpperCamelCase : List[str] = processor(text=A_ , return_tensors="np" ) UpperCamelCase : str = tokenizer(A_ , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.get_image_processor() UpperCamelCase : int = self.get_tokenizer() UpperCamelCase : Tuple = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : int = "lower newer" UpperCamelCase : List[str] = self.prepare_image_inputs() UpperCamelCase : Any = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = "google/owlvit-base-patch32" UpperCamelCase : int = OwlViTProcessor.from_pretrained(A_ ) UpperCamelCase : Union[str, Any] = ["cat", "nasa badge"] UpperCamelCase : Any = processor(text=A_ ) UpperCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "google/owlvit-base-patch32" UpperCamelCase : List[str] = OwlViTProcessor.from_pretrained(A_ ) UpperCamelCase : Dict = [["cat", "nasa badge"], ["person"]] UpperCamelCase : Tuple = processor(text=A_ ) UpperCamelCase : str = 16 UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Tuple = max([len(A_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = "google/owlvit-base-patch32" UpperCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(A_ ) UpperCamelCase : List[Any] = ["cat", "nasa badge"] UpperCamelCase : Any = processor(text=A_ ) UpperCamelCase : Optional[int] = 16 UpperCamelCase : Tuple = inputs["input_ids"] UpperCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_image_processor() UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Tuple = self.prepare_image_inputs() UpperCamelCase : Optional[Any] = self.prepare_image_inputs() UpperCamelCase : Tuple = processor(images=A_ , query_images=A_ ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.get_image_processor() UpperCamelCase : Dict = self.get_tokenizer() UpperCamelCase : int = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) UpperCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase : Dict = processor.batch_decode(A_ ) UpperCamelCase : Optional[int] = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ )
52
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
1
from cva import destroyAllWindows, imread, imshow, waitKey def A_ ( _lowerCAmelCase ) -> int: # getting number of pixels in the image UpperCamelCase , UpperCamelCase : List[str] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): UpperCamelCase : Tuple = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __lowerCamelCase : Any = imread("""image_data/lena.jpg""", 1) # convert to its negative __lowerCamelCase : Optional[int] = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCamelCase : Optional[int] = 3 def A_ ( _lowerCAmelCase ) -> int: print("Generating primitive root of p" ) while True: UpperCamelCase : Tuple = random.randrange(3 , _lowerCAmelCase ) if pow(_lowerCAmelCase , 2 , _lowerCAmelCase ) == 1: continue if pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) == 1: continue return g def A_ ( _lowerCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p..." ) UpperCamelCase : Tuple = rabin_miller.generate_large_prime(_lowerCAmelCase ) # select large prime number. UpperCamelCase : Union[str, Any] = primitive_root(_lowerCAmelCase ) # one primitive root on modulo p. UpperCamelCase : Any = random.randrange(3 , _lowerCAmelCase ) # private_key -> have to be greater than 2 for safety. UpperCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) UpperCamelCase : Union[str, Any] = (key_size, e_a, e_a, p) UpperCamelCase : Tuple = (key_size, d) return public_key, private_key def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None: if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() UpperCamelCase , UpperCamelCase : str = generate_key(_lowerCAmelCase ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , "w" ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , "w" ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def A_ ( ) -> None: print("Making key files..." ) make_key_files("elgamal" , 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
52
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :List[Any] = 'luke' def __init__( self , A_=5_0267 , A_=50_0000 , A_=768 , A_=256 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=None , A_=1 , A_=0 , A_=2 , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Any = vocab_size UpperCamelCase : int = entity_vocab_size UpperCamelCase : Any = hidden_size UpperCamelCase : Dict = entity_emb_size UpperCamelCase : List[str] = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : int = hidden_act UpperCamelCase : Any = intermediate_size UpperCamelCase : List[str] = hidden_dropout_prob UpperCamelCase : Optional[int] = attention_probs_dropout_prob UpperCamelCase : Optional[int] = max_position_embeddings UpperCamelCase : List[Any] = type_vocab_size UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : Dict = layer_norm_eps UpperCamelCase : str = use_entity_aware_attention UpperCamelCase : Optional[int] = classifier_dropout
52
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : str = { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class A__ ( __snake_case ): _UpperCAmelCase :str = 'fnet' def __init__( self , A_=3_2000 , A_=768 , A_=12 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=512 , A_=4 , A_=0.02 , A_=1e-12 , A_=False , A_=512 , A_=3 , A_=1 , A_=2 , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Any = vocab_size UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = hidden_size UpperCamelCase : List[str] = num_hidden_layers UpperCamelCase : Any = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : Dict = initializer_range UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : str = layer_norm_eps UpperCamelCase : List[str] = use_tpu_fourier_optimizations UpperCamelCase : Optional[int] = tpu_short_seq_length
52
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(A_ , "neck_hidden_sizes" ) ) self.parent.assertTrue(hasattr(A_ , "num_attention_heads" ) ) class A__ : def __init__( self , A_ , A_=13 , A_=32 , A_=2 , A_=3 , A_=640 , A_=4 , A_="silu" , A_=3 , A_=32 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = image_size UpperCamelCase : List[Any] = patch_size UpperCamelCase : Any = num_channels UpperCamelCase : Optional[int] = last_hidden_size UpperCamelCase : str = num_attention_heads UpperCamelCase : Optional[int] = hidden_act UpperCamelCase : Dict = conv_kernel_size UpperCamelCase : Any = output_stride UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : Dict = classifier_dropout_prob UpperCamelCase : Optional[int] = use_labels UpperCamelCase : str = is_training UpperCamelCase : Tuple = num_labels UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : int = scope def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[Any] = None UpperCamelCase : Optional[int] = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCamelCase( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = MobileViTModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Tuple = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Tuple = self.num_labels UpperCamelCase : int = MobileViTForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = self.num_labels UpperCamelCase : Any = MobileViTForSemanticSegmentation(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCamelCase : str = model(A_ , labels=A_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = config_and_inputs UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) _UpperCAmelCase :Dict = ( { 'feature-extraction': MobileViTModel, 'image-classification': MobileViTForImageClassification, 'image-segmentation': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCAmelCase :Tuple = False _UpperCAmelCase :int = False _UpperCAmelCase :Tuple = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = MobileViTModelTester(self ) UpperCamelCase : int = MobileViTConfigTester(self , config_class=A_ , has_text_modality=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="MobileViT does not support input and output embeddings" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="MobileViT does not output attentions" ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(A_ ) UpperCamelCase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Optional[int] = [*signature.parameters.keys()] UpperCamelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase : Tuple = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): UpperCamelCase : Dict = model(**self._prepare_for_class(A_ , A_ ) ) UpperCamelCase : str = outputs.hidden_states UpperCamelCase : Optional[int] = 5 self.assertEqual(len(A_ ) , A_ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCamelCase : Tuple = 2 for i in range(len(A_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : int = True check_hidden_states_output(A_ , A_ , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Tuple = MobileViTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_ ( ) -> Union[str, Any]: UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def __UpperCamelCase( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(A_ ) UpperCamelCase : List[Any] = self.default_image_processor UpperCamelCase : Any = prepare_img() UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : List[str] = model(**A_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase : Any = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) UpperCamelCase : str = model.to(A_ ) UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) UpperCamelCase : Tuple = prepare_img() UpperCamelCase : Dict = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Dict = model(**A_ ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : int = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , A_ ) UpperCamelCase : Any = torch.tensor( [ [[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]], [[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]], [[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]], ] , device=A_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) UpperCamelCase : Any = model.to(A_ ) UpperCamelCase : List[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Dict = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Tuple = model(**A_ ) UpperCamelCase : Tuple = outputs.logits.detach().cpu() UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] ) UpperCamelCase : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , A_ ) UpperCamelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=A_ ) UpperCamelCase : Optional[int] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , A_ )
52
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __snake_case ): _UpperCAmelCase :List[Any] = (DDIMParallelScheduler,) _UpperCAmelCase :Any = (('eta', 0.0), ('num_inference_steps', 5_0)) def __UpperCamelCase( self , **A_ ): '''simple docstring''' UpperCamelCase : List[str] = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**A_ ) return config def __UpperCamelCase( self , **A_ ): '''simple docstring''' UpperCamelCase : Tuple = self.scheduler_classes[0] UpperCamelCase : List[Any] = self.get_scheduler_config(**A_ ) UpperCamelCase : Dict = scheduler_class(**A_ ) UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0 UpperCamelCase : List[Any] = self.dummy_model() UpperCamelCase : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(A_ ) for t in scheduler.timesteps: UpperCamelCase : Any = model(A_ , A_ ) UpperCamelCase : Any = scheduler.step(A_ , A_ , A_ , A_ ).prev_sample return sample def __UpperCamelCase( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A_ ) UpperCamelCase : Optional[Any] = self.scheduler_classes[0] UpperCamelCase : Dict = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase : List[str] = scheduler_class(**A_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __UpperCamelCase( self ): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=A_ , num_inference_steps=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A_ , eta=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.scheduler_classes[0] UpperCamelCase : Union[str, Any] = self.get_scheduler_config() UpperCamelCase : List[Any] = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.scheduler_classes[0] UpperCamelCase : Any = self.get_scheduler_config() UpperCamelCase : List[Any] = scheduler_class(**A_ ) UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0 scheduler.set_timesteps(A_ ) UpperCamelCase : Tuple = self.dummy_model() UpperCamelCase : List[str] = self.dummy_sample_deter UpperCamelCase : Optional[int] = self.dummy_sample_deter + 0.1 UpperCamelCase : Optional[int] = self.dummy_sample_deter - 0.1 UpperCamelCase : Optional[Any] = samplea.shape[0] UpperCamelCase : Dict = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase : int = torch.arange(A_ )[0:3, None].repeat(1 , A_ ) UpperCamelCase : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase : Optional[int] = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A_ ) UpperCamelCase : Optional[Any] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Any = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2 assert abs(result_mean.item() - 0.49_82 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop() UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.full_loop(prediction_type="v_prediction" ) UpperCamelCase : List[str] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Optional[int] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1e-2 assert abs(result_mean.item() - 0.06_84 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 ) UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2 assert abs(result_mean.item() - 0.19_51 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 ) UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2 assert abs(result_mean.item() - 0.19_41 ) < 1e-3
52
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A_ ( _lowerCAmelCase ) -> int: # picklable for multiprocessing return x.sum() def A_ ( _lowerCAmelCase ) -> str: # picklable for multiprocessing return i + 1 @dataclass class A__ : _UpperCAmelCase :int _UpperCAmelCase :str class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = {} UpperCamelCase : Any = [] UpperCamelCase : Dict = 1 UpperCamelCase : Optional[int] = [1, 2] UpperCamelCase : Union[str, Any] = {"a": 1, "b": 2} UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]} UpperCamelCase : Optional[Any] = {"a": {"1": 1}, "b": 2} UpperCamelCase : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} UpperCamelCase : Dict = {} UpperCamelCase : List[str] = [] UpperCamelCase : Union[str, Any] = 2 UpperCamelCase : str = [2, 3] UpperCamelCase : str = {"a": 2, "b": 3} UpperCamelCase : Optional[Any] = {"a": [2, 3], "b": [4, 5]} UpperCamelCase : List[str] = {"a": {"1": 2}, "b": 3} UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) UpperCamelCase : Any = 2 self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) UpperCamelCase : Optional[int] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} UpperCamelCase : Optional[Any] = {"a": 2, "b": 0, "c": 2} UpperCamelCase : Optional[Any] = { "a": np.eye(2 ).astype(A_ ), "b": np.zeros(3 ).astype(A_ ), "c": np.ones(2 ).astype(A_ ), } self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A_ ): # can't pickle a local lambda map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = {"a": 1, "b": 2} UpperCamelCase : Union[str, Any] = {"a": 3, "b": 4} UpperCamelCase : Optional[int] = {"a": 5, "b": 6} UpperCamelCase : Tuple = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ ) def __UpperCamelCase( self ): '''simple docstring''' class A__ : _UpperCAmelCase :int = 'bar' UpperCamelCase : Dict = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(A_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: UpperCamelCase : int = {F"""{i}""": i for i in range(_lowerCAmelCase )} UpperCamelCase : Optional[int] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A__ ( __snake_case ): @require_tf def __UpperCamelCase( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers UpperCamelCase : Dict = layers.Dense(2 ) def gen_random_output(): UpperCamelCase : Optional[int] = tf.random.uniform((1, 3) ) return model(A_ ).numpy() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : Optional[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=A_ ): UpperCamelCase : List[str] = gen_random_output() UpperCamelCase : Optional[int] = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch def gen_random_output(): UpperCamelCase : Any = torch.nn.Linear(3 , 2 ) UpperCamelCase : Union[str, Any] = torch.rand(1 , 3 ) return model(A_ ).detach().numpy() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : List[str] = gen_random_output() with temp_seed(42 , set_pytorch=A_ ): UpperCamelCase : Dict = gen_random_output() UpperCamelCase : str = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __UpperCamelCase( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCamelCase : Tuple = gen_random_output() with temp_seed(42 ): UpperCamelCase : int = gen_random_output() UpperCamelCase : Any = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}] ) def A_ ( _lowerCAmelCase ) -> Dict: UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).flatten() assert output == expected_output def A_ ( ) -> List[Any]: UpperCamelCase : Dict = A(x=1 , y="foobar" ) UpperCamelCase : Optional[Any] = {"x": 1, "y": "foobar"} assert asdict(_lowerCAmelCase ) == expected_output UpperCamelCase : Tuple = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} UpperCamelCase : str = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCAmelCase ) == expected_output with pytest.raises(_lowerCAmelCase ): asdict([1, A(x=10 , y="foo" )] ) def A_ ( _lowerCAmelCase ) -> Any: return text.split() def A_ ( _lowerCAmelCase ) -> Optional[int]: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A_ ( ) -> int: with Pool(2 ) as pool: UpperCamelCase : Optional[Any] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCamelCase : Tuple = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(_lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCamelCase : List[str] = [] for yield_time, content in iflatmap_unordered( _lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCAmelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(_lowerCAmelCase ) == 4
52
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
1
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :int = KandinskyVaaPriorPipeline _UpperCAmelCase :int = ['prompt'] _UpperCAmelCase :str = ['prompt', 'negative_prompt'] _UpperCAmelCase :Tuple = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] _UpperCAmelCase :Tuple = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(A_ ) @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[Any] = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } UpperCamelCase : List[str] = PriorTransformer(**A_ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 UpperCamelCase : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : int = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) UpperCamelCase : Dict = CLIPVisionModelWithProjection(A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.dummy_prior UpperCamelCase : Any = self.dummy_image_encoder UpperCamelCase : List[str] = self.dummy_text_encoder UpperCamelCase : Dict = self.dummy_tokenizer UpperCamelCase : Optional[Any] = self.dummy_image_processor UpperCamelCase : List[str] = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=A_ , clip_sample_range=10.0 , ) UpperCamelCase : Optional[int] = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' if str(A_ ).startswith("mps" ): UpperCamelCase : List[Any] = torch.manual_seed(A_ ) else: UpperCamelCase : int = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = "cpu" UpperCamelCase : Dict = self.get_dummy_components() UpperCamelCase : Union[str, Any] = self.pipeline_class(**A_ ) UpperCamelCase : str = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : str = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : Any = output.image_embeds UpperCamelCase : int = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[str] = image[0, -10:] UpperCamelCase : Dict = image_from_tuple[0, -10:] assert image.shape == (1, 32) UpperCamelCase : Any = np.array( [-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = torch_device == "cpu" UpperCamelCase : Dict = True UpperCamelCase : str = False self._test_inference_batch_single_identical( test_max_difference=A_ , relax_max_difference=A_ , test_mean_pixel_difference=A_ , ) @skip_mps def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = torch_device == "cpu" UpperCamelCase : Tuple = False self._test_attention_slicing_forward_pass( test_max_difference=A_ , test_mean_pixel_difference=A_ , )
52
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A__ ( __snake_case ): _UpperCAmelCase :int = 'gpt_neo' _UpperCAmelCase :int = ['past_key_values'] _UpperCAmelCase :int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , A_=5_0257 , A_=2048 , A_=2048 , A_=24 , A_=[[["global", "local"], 12]] , A_=16 , A_=None , A_=256 , A_="gelu_new" , A_=0.0 , A_=0.0 , A_=0.0 , A_=0.1 , A_=1e-5 , A_=0.02 , A_=True , A_=5_0256 , A_=5_0256 , **A_ , ): '''simple docstring''' UpperCamelCase : List[Any] = vocab_size UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : List[Any] = hidden_size UpperCamelCase : str = num_layers UpperCamelCase : Any = num_heads UpperCamelCase : Optional[int] = intermediate_size UpperCamelCase : int = window_size UpperCamelCase : Any = activation_function UpperCamelCase : List[Any] = resid_dropout UpperCamelCase : List[str] = embed_dropout UpperCamelCase : str = attention_dropout UpperCamelCase : Optional[Any] = classifier_dropout UpperCamelCase : List[Any] = layer_norm_epsilon UpperCamelCase : Any = initializer_range UpperCamelCase : Union[str, Any] = use_cache UpperCamelCase : Optional[Any] = bos_token_id UpperCamelCase : List[Any] = eos_token_id UpperCamelCase : Optional[int] = attention_types UpperCamelCase : Any = self.expand_attention_types_params(A_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ F"""`config.num_layers = {self.num_layers}`. """ "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ ) @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: import torch UpperCamelCase : List[str] = input.size() UpperCamelCase : Any = len(_lowerCAmelCase ) UpperCamelCase : Dict = shape[dimension] UpperCamelCase : Optional[Any] = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : str = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1 UpperCamelCase : Union[str, Any] = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None] UpperCamelCase : Dict = [slice(_lowerCAmelCase )] * rank UpperCamelCase : Dict = indices UpperCamelCase : str = input[s] UpperCamelCase : Any = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: import torch UpperCamelCase : Optional[int] = torch.arange(1 , _lowerCAmelCase ) UpperCamelCase : str = torch.remainder(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = remainders == 0 UpperCamelCase : Tuple = candidates[divisor_indices] UpperCamelCase : int = torch.max(_lowerCAmelCase ) return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) UpperCamelCase : Optional[int] = {0: "batch", 1: "past_sequence + sequence"} else: UpperCamelCase : str = {0: "batch", 1: "sequence"} return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' return self._config.num_heads def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[str] = super(A_ , self ).generate_dummy_inputs( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) # We need to order the input in the way they appears in the forward() UpperCamelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[int] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCamelCase : Any = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers ) ] UpperCamelCase : Any = common_inputs["attention_mask"] if self.use_past: UpperCamelCase : Any = ordered_inputs["attention_mask"].dtype UpperCamelCase : List[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) return ordered_inputs @property def __UpperCamelCase( self ): '''simple docstring''' return 13
52
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
1
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Optional[Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : List[str] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCamelCase : Union[str, Any] = True # Deal with multi-line cases elif ( re.search( rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowerCAmelCase , ) is not None ): UpperCamelCase : List[str] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCamelCase : str = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCamelCase : Tuple = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] UpperCamelCase : str = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed UpperCamelCase : List[Any] = True if not attribute_used: UpperCamelCase : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCamelCase : Optional[Any] = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCamelCase : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCamelCase : int = True elif attribute.endswith("_token_id" ): UpperCamelCase : Optional[int] = True # configuration class specific cases if not case_allowed: UpperCamelCase : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCamelCase : List[str] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCamelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] UpperCamelCase : Union[str, Any] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCamelCase : Optional[int] = {} if len(config_class.attribute_map ) > 0: UpperCamelCase : Any = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCamelCase : List[Any] = inspect.getsourcefile(_lowerCAmelCase ) UpperCamelCase : Tuple = os.path.dirname(_lowerCAmelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCamelCase : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("modeling_" )] # Get the source code strings UpperCamelCase : Dict = [] for path in modeling_paths: if os.path.isfile(_lowerCAmelCase ): with open(_lowerCAmelCase ) as fp: modeling_sources.append(fp.read() ) UpperCamelCase : List[str] = [] for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ): # `attributes` here is all the variant names for `config_param` UpperCamelCase : Union[str, Any] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCAmelCase ) def A_ ( ) -> str: UpperCamelCase : List[str] = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCamelCase : Tuple = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ) and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCamelCase : Any = check_config_attributes_being_used(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : int = unused_attributes if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": check_config_attributes()
52
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
1
def A_ ( _lowerCAmelCase ) -> str: if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) UpperCamelCase : List[str] = "" while len(_lowerCAmelCase ) % 3 != 0: UpperCamelCase : int = "0" + bin_string UpperCamelCase : Optional[int] = [ bin_string[index : index + 3] for index in range(len(_lowerCAmelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: UpperCamelCase : Any = 0 for index, val in enumerate(_lowerCAmelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) ) oct_string += str(_lowerCAmelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
52
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
1
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , ): '''simple docstring''' UpperCamelCase : Optional[Any] = parent UpperCamelCase : Union[str, Any] = batch_size UpperCamelCase : Optional[int] = image_size UpperCamelCase : List[Any] = patch_size UpperCamelCase : List[str] = num_channels UpperCamelCase : Tuple = is_training UpperCamelCase : Optional[int] = use_labels UpperCamelCase : Dict = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Dict = num_attention_heads UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : Optional[int] = hidden_act UpperCamelCase : Dict = hidden_dropout_prob UpperCamelCase : Dict = attention_probs_dropout_prob UpperCamelCase : str = type_sequence_label_size UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : Dict = scope UpperCamelCase : str = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase : Any = (image_size // patch_size) ** 2 UpperCamelCase : List[Any] = num_patches + 1 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def __UpperCamelCase( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = ViTModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Any = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = ViTForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCamelCase : List[Any] = 1 UpperCamelCase : Dict = ViTForMaskedImageModeling(A_ ) model.to(A_ ) model.eval() UpperCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.type_sequence_label_size UpperCamelCase : Union[str, Any] = ViTForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Any = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase : List[str] = 1 UpperCamelCase : Optional[int] = ViTForImageClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : Optional[int] = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : str = config_and_inputs UpperCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCAmelCase :List[str] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :str = True _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Tuple = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = ViTModelTester(self ) UpperCamelCase : str = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[int] = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[str] = model_class(A_ ) UpperCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Any = [*signature.parameters.keys()] UpperCamelCase : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Tuple = ViTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def A_ ( ) -> Any: UpperCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def __UpperCamelCase( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(A_ ) UpperCamelCase : str = self.default_image_processor UpperCamelCase : Optional[Any] = prepare_img() UpperCamelCase : Tuple = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(**A_ ) # verify the logits UpperCamelCase : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) UpperCamelCase : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = ViTModel.from_pretrained("facebook/dino-vits8" ).to(A_ ) UpperCamelCase : str = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 ) UpperCamelCase : str = prepare_img() UpperCamelCase : Optional[Any] = image_processor(images=A_ , return_tensors="pt" ) UpperCamelCase : str = inputs.pixel_values.to(A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[Any] = model(A_ , interpolate_pos_encoding=A_ ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , A_ ) UpperCamelCase : str = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) UpperCamelCase : Tuple = self.default_image_processor UpperCamelCase : Optional[int] = prepare_img() UpperCamelCase : Tuple = image_processor(images=A_ , return_tensors="pt" ) UpperCamelCase : Tuple = inputs.pixel_values.to(A_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCamelCase : List[str] = model(A_ )
52
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
1