code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase__ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = predict_dataset.features["label"].names
# Labels
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
def preprocess_function(lowerCAmelCase_ : int ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase__ = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase__ = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase__ = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples )
lowerCAmelCase__ = predict_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase__ = predict_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCAmelCase__ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ , metric_key_prefix="predict" )
lowerCAmelCase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("predict" , lowerCAmelCase_ )
trainer.save_metrics("predict" , lowerCAmelCase_ )
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 61
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> int:
lowerCAmelCase__ = "ZinengTang/tvlt-base"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 61
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowercase : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowercase : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def A (__lowerCamelCase :str , __lowerCamelCase :List[str]=100 , __lowerCamelCase :List[str]=" " ):
_lowerCAmelCase = text.split(__lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase )]
def A (__lowerCamelCase :dict ):
_lowerCAmelCase , _lowerCAmelCase = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__lowerCamelCase ):
titles.append(title if title is not None else """""" )
texts.append(__lowerCamelCase )
return {"title": titles, "text": texts}
def A (__lowerCamelCase :dict , __lowerCamelCase :DPRContextEncoder , __lowerCamelCase :DPRContextEncoderTokenizerFast ):
_lowerCAmelCase = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
_lowerCAmelCase = ctx_encoder(input_ids.to(device=__lowerCamelCase ) , return_dict=__lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A (__lowerCamelCase :"RagExampleArguments" , __lowerCamelCase :"ProcessingArguments" , __lowerCamelCase :"IndexHnswArguments" , ):
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowerCAmelCase = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowerCAmelCase = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCamelCase )
_lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowerCAmelCase = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
_lowerCAmelCase = dataset.map(
partial(__lowerCamelCase , ctx_encoder=__lowerCamelCase , ctx_tokenizer=__lowerCamelCase ) , batched=__lowerCamelCase , batch_size=processing_args.batch_size , features=__lowerCamelCase , )
# And finally save your dataset
_lowerCAmelCase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__lowerCamelCase )
# And save the index
_lowerCAmelCase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=str(Path(_SCREAMING_SNAKE_CASE ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowercase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowercase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowercase : Optional[str] = field(
default=str(Path(_SCREAMING_SNAKE_CASE ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowercase : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowercase : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowercase , _lowercase , _lowercase : Dict = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowercase : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 710
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''table-transformer'''
_lowercase : List[str] = ['''past_key_values''']
_lowercase : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=100 , _lowercase=6 , _lowercase=2_048 , _lowercase=8 , _lowercase=6 , _lowercase=2_048 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=1 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=5 , _lowercase=2 , _lowercase=0.1 , **_lowercase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = backbone_config.get("""model_type""" )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(_lowercase )
# set timm attributes to None
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None, None, None
_lowerCAmelCase = use_timm_backbone
_lowerCAmelCase = backbone_config
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_queries
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = auxiliary_loss
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = backbone
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = dilation
# Hungarian matcher
_lowerCAmelCase = class_cost
_lowerCAmelCase = bbox_cost
_lowerCAmelCase = giou_cost
# Loss coefficients
_lowerCAmelCase = mask_loss_coefficient
_lowerCAmelCase = dice_loss_coefficient
_lowerCAmelCase = bbox_loss_coefficient
_lowerCAmelCase = giou_loss_coefficient
_lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = version.parse('''1.11''' )
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 162
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : int = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : List[str] = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
__UpperCamelCase : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = LxmertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 4
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Union[str, Any] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 548
| 0
|
def _snake_case (__lowercase):
UpperCamelCase_ = int(__lowercase)
if n_element < 1:
UpperCamelCase_ = ValueError('a should be a positive number')
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5))
index += 1
return hamming_list
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
snake_case__ : str = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 618
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618
| 1
|
import random
def _snake_case ( __snake_case , __snake_case , __snake_case = False ):
_UpperCamelCase = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _snake_case ( __snake_case ):
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
for attribute in key.split('''.''' ):
__UpperCAmelCase =getattr(_lowercase , _lowercase )
if weight_type is not None:
__UpperCAmelCase =getattr(_lowercase , _lowercase ).shape
else:
__UpperCAmelCase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase =value
elif weight_type == "weight_g":
__UpperCAmelCase =value
elif weight_type == "weight_v":
__UpperCAmelCase =value
elif weight_type == "bias":
__UpperCAmelCase =value
else:
__UpperCAmelCase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =[]
__UpperCAmelCase =fairseq_model.state_dict()
__UpperCAmelCase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCAmelCase =None
for name, value in fairseq_dict.items():
__UpperCAmelCase =False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase =True
elif name.split('''.''' )[0] == "proj":
__UpperCAmelCase =fairseq_model.proj
__UpperCAmelCase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase =True
if "*" in mapped_key:
__UpperCAmelCase =name.split(_lowercase )[0].split('''.''' )[-2]
__UpperCAmelCase =mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
__UpperCAmelCase ='''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase ='''weight_v'''
elif "bias" in name:
__UpperCAmelCase ='''bias'''
elif "weight" in name:
__UpperCAmelCase ='''weight'''
else:
__UpperCAmelCase =None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase =name.split('''.''' )
__UpperCAmelCase =int(items[0] )
__UpperCAmelCase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase =emb.weight.shape
__UpperCAmelCase =nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__UpperCAmelCase =emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase =f.readlines()
__UpperCAmelCase =[line.split(''' ''' )[0] for line in lines]
__UpperCAmelCase =len(_lowercase )
__UpperCAmelCase ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
__UpperCAmelCase =WavaVecaConfig.from_pretrained(_lowercase )
__UpperCAmelCase =SpeechaTextaConfig.from_pretrained(
_lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase )
__UpperCAmelCase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__UpperCAmelCase =model[0].eval()
# set weights for wav2vec2 encoder
__UpperCAmelCase =WavaVecaModel(_lowercase )
__UpperCAmelCase =recursively_load_weights_wavaveca(model.encoder , _lowercase )
__UpperCAmelCase =SpeechaTextaForCausalLM(_lowercase )
__UpperCAmelCase , __UpperCAmelCase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__UpperCAmelCase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__UpperCAmelCase =SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
__UpperCAmelCase =False
# add projection layer
__UpperCAmelCase =nn.Parameter(projection_layer.weight )
__UpperCAmelCase =nn.Parameter(projection_layer.bias )
__UpperCAmelCase =create_vocab_dict(_lowercase )
with open(os.path.join(_lowercase , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
__UpperCAmelCase =SpeechaTextaTokenizer(os.path.join(_lowercase , '''vocab.json''' ) )
tokenizer.save_pretrained(_lowercase )
__UpperCAmelCase =hf_wavavec.config.to_dict()
__UpperCAmelCase =tokenizer.pad_token_id
__UpperCAmelCase =tokenizer.bos_token_id
__UpperCAmelCase =tokenizer.eos_token_id
__UpperCAmelCase ='''speech_to_text_2'''
__UpperCAmelCase ='''wav2vec2'''
__UpperCAmelCase =SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 531
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case:
def __init__(self : Any , a : str , a : Union[str, Any]=12 , a : List[str]=7 , a : Dict=True , a : Tuple=True , a : Any=True , a : Optional[Any]=99 , a : Optional[Any]=32 , a : Tuple=32 , a : List[Any]=2 , a : str=4 , a : Dict=37 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Dict=5_12 , a : List[Any]=0.02 , a : Any=0 , a : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase (self : Optional[int] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = TFBlipTextModel(config=a )
A__ = model(a , attention_mask=a , training=a )
A__ = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase (self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case: Union[str, Any] = False
__snake_case: Any = False
__snake_case: Union[str, Any] = False
def _UpperCamelCase (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Any ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase (self : str , a : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 531
| 1
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __A , __A , __A ):
"""simple docstring"""
a_ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Tuple , __A : Tuple , __A : str , __A : Union[str, Any] = None , __A : Union[str, Any] = 5_0_2_5_7 , __A : int = 1_0_2_4 , __A : Optional[int] = 7_6_8 , __A : str = 1_2 , __A : List[Any] = 1_2 , __A : List[Any] = None , __A : Union[str, Any] = "gelu_new" , __A : str = 0.1 , __A : List[str] = 0.1 , __A : Any = 0.1 , __A : Any = 1e-5 , __A : int = 0.0_2 , __A : str = True , __A : int = True , __A : Optional[int] = False , __A : Any = False , ):
super().__init__()
snake_case__ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
snake_case__ : Optional[Any] = prefix_inner_dim
snake_case__ : Any = prefix_hidden_dim
snake_case__ : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case__ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case__ : str = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
snake_case__ : List[Any] = GPTaLMHeadModel(__A )
def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : int , __A : int = None , __A : str = None , ):
snake_case__ : Tuple = self.transformer.transformer.wte(__A )
snake_case__ : Optional[int] = self.encode_prefix(__A )
snake_case__ : str = self.decode_prefix(__A )
snake_case__ : Tuple = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case__ : Tuple = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case__ : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case__ : List[str] = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : str ):
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def _lowercase ( self : Optional[int] , __A : List[str] ):
return self.encode_prefix(__A )
@torch.no_grad()
def _lowercase ( self : Optional[Any] , __A : List[str] , __A : Dict , __A : List[Any] ):
snake_case__ : Any = torch.split(__A , 1 , dim=0 )
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = []
for feature in features:
snake_case__ : Optional[int] = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
snake_case__ : Dict = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case__ : Any = torch.stack(__A )
snake_case__ : int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowercase ( self : Optional[Any] , __A : List[str]=None , __A : Dict=None , __A : Optional[Any]=None , __A : Union[str, Any] = 5 , __A : List[str] = 6_7 , __A : str = 1.0 , __A : Optional[int] = None , ):
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
snake_case__ : Union[str, Any] = torch.ones(__A , device=__A , dtype=torch.int )
snake_case__ : int = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
snake_case__ : Optional[Any] = input_embeds
else:
snake_case__ : Dict = self.transformer.transformer.wte(__A )
for i in range(__A ):
snake_case__ : Dict = self.transformer(inputs_embeds=__A )
snake_case__ : Optional[Any] = outputs.logits
snake_case__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case__ : List[str] = logits.softmax(-1 ).log()
if scores is None:
snake_case__ : int = logits.topk(__A , -1 )
snake_case__ : Any = generated.expand(__A , *generated.shape[1:] )
snake_case__ : Tuple = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case__ : Tuple = next_tokens
else:
snake_case__ : int = tokens.expand(__A , *tokens.shape[1:] )
snake_case__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case__ : Tuple = -float(np.inf )
snake_case__ : int = 0
snake_case__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case__ : Union[str, Any] = scores_sum / seq_lengths[:, None]
snake_case__ : List[Any] = scores_sum_average.view(-1 ).topk(__A , -1 )
snake_case__ : Optional[Any] = next_tokens // scores_sum.shape[1]
snake_case__ : str = seq_lengths[next_tokens_source]
snake_case__ : List[str] = next_tokens % scores_sum.shape[1]
snake_case__ : Dict = next_tokens.unsqueeze(1 )
snake_case__ : Dict = tokens[next_tokens_source]
snake_case__ : int = torch.cat((tokens, next_tokens) , dim=1 )
snake_case__ : Dict = generated[next_tokens_source]
snake_case__ : Dict = scores_sum_average * seq_lengths
snake_case__ : str = is_stopped[next_tokens_source]
snake_case__ : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case__ : Dict = torch.cat((generated, next_token_embed) , dim=1 )
snake_case__ : Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
snake_case__ : List[str] = scores / seq_lengths
snake_case__ : List[str] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
snake_case__ : Union[str, Any] = [tokens[i] for i in order]
snake_case__ : Union[str, Any] = torch.stack(__A , dim=0 )
snake_case__ : Any = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 703
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ):
"""simple docstring"""
def identity_function(UpperCamelCase__ : float ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
def function_to_integrate(UpperCamelCase__ : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case__ : Any = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : List[str] ):
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def _snake_case ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase = FlaxBertModel(lowerCamelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase , repo_id="test-model-flax" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowercase = FlaxBertModel(lowerCamelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
__lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__lowercase = flatten_dict(unfreeze(model.params ) )
__lowercase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowercase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = True
__lowercase = flatten_dict(modela.params )
__lowercase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__lowercase = False
return models_are_equal
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__lowercase = FlaxBertModel(lowerCamelCase )
__lowercase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) )
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__lowercase = FlaxBertModel(lowerCamelCase )
__lowercase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) , max_shard_size="10KB" )
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "bert"
__lowercase = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = "bert"
__lowercase = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowerCamelCase ):
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase )
__lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 655
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A = {'''UserAgent''': UserAgent().random}
def __a ( lowerCAmelCase_ : int ) -> dict:
'''simple docstring'''
UpperCAmelCase_= script.contents[0]
UpperCAmelCase_= json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : str ) -> Tuple:
UpperCAmelCase_= F"""https://www.instagram.com/{username}/"""
UpperCAmelCase_= self.get_json()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> dict:
UpperCAmelCase_= requests.get(self.url , headers=__UpperCAmelCase ).text
UpperCAmelCase_= BeautifulSoup(__UpperCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Any ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["username"]
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
return self.user_data["full_name"]
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return self.user_data["biography"]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["business_email"]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["external_url"]
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> bool:
return self.user_data["is_verified"]
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
return self.user_data["is_private"]
def __a ( lowerCAmelCase_ : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCAmelCase_= InstagramUser(lowerCAmelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowerCAmelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = InstagramUser('''github''')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 593
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Dict = ConsistencyModelPipeline
a__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_= self.dummy_cond_unet
else:
UpperCAmelCase_= self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=0 ) -> Optional[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : str=torch.floataa , __UpperCAmelCase : Tuple=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCAmelCase_= self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
UpperCAmelCase_= latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any=0 , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : Optional[Any]=torch.floataa , __UpperCAmelCase : Any=(1, 3, 64, 64) ) -> List[str]:
if type(__UpperCAmelCase ) == str:
UpperCAmelCase_= torch.device(__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 593
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def _snake_case ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def _snake_case ( self ) -> int:
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """A red cat sitting on a park bench"""
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type="""np""" , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self ) -> Dict:
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """A red cat sitting on a park bench"""
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type="""np""" , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 393
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be an 'int' type""" )
lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 1
|
"""simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__:List[Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCamelCase( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ):
if attention_mask is None:
__a = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0.02 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
__a = initializer_range
def a__ ( self ):
__a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a = shift_tokens_right(lowerCamelCase , 1 , 2 )
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase , )
__a = prepare_blenderbot_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = 20
__a = model_class_name(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
__a = model.decode(lowerCamelCase , lowerCamelCase )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = 20
__a = model_class_name(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__a = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class snake_case__ ( unittest.TestCase ):
_snake_case : Dict = 99
def a__ ( self ):
__a = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a = input_ids.shape[0]
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a__ ( self ):
__a , __a , __a = self._get_config_and_data()
__a = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__a = lm_model(input_ids=lowerCamelCase )
__a = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def a__ ( self ):
__a = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase )
__a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a = lm_model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase )
def a__ ( self ):
__a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a = shift_tokens_right(lowerCamelCase , 1 , 2 )
__a = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
__a = np.equal(lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case__ ( snake_case_, unittest.TestCase, snake_case_ ):
_snake_case : Any = True
_snake_case : int = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a__ ( self ):
__a = FlaxBlenderbotSmallModelTester(self )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
__a = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(lowerCamelCase )
__a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__a = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a__ ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a = np.ones((1, 1) ) * model.config.eos_token_id
__a = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 528
| 1
|
'''simple docstring'''
from math import isclose, sqrt
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float ) -> tuple[float, float, float]:
_a = point_y / 4 / point_x
_a = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_a = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_a = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_a = outgoing_gradient**2 + 4
_a = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_a = (point_y - outgoing_gradient * point_x) ** 2 - 100
_a = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_a = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_a = x_minus if isclose(lowercase , lowercase ) else x_plus
_a = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCamelCase ( lowercase : float = 1.4 , lowercase : float = -9.6 ) -> int:
_a = 0
_a = first_x_coord
_a = first_y_coord
_a = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_a , _a , _a = next_point(lowercase , lowercase , lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 521
|
'''simple docstring'''
import math
import sys
def _lowerCamelCase ( lowercase : str ) -> str:
_a = ""
try:
with open(lowercase , "rb" ) as binary_file:
_a = binary_file.read()
for dat in data:
_a = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = {"0": "0", "1": "1"}
_a , _a = "", ""
_a = len(lowercase )
for i in range(len(lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
_a = last_match_id + "0"
if math.loga(lowercase ).is_integer():
_a = {}
for curr_key in list(lowercase ):
_a = lexicon.pop(lowercase )
_a = new_lex
_a = last_match_id + "1"
index += 1
_a = ""
return result
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = 8
try:
with open(lowercase , "wb" ) as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase ) , lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a = data_bits[counter:]
_a = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = read_file_binary(lowercase )
_a = remove_prefix(lowercase )
_a = decompress_data(lowercase )
write_file_binary(lowercase , lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 521
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 519
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , ) -> str:
_UpperCAmelCase : List[Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Union[str, Any] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : List[Any] = apply_ocr
def __lowerCAmelCase ( self ) -> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''apply_ocr''' ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , A )
self.assertIsInstance(encoding.boxes , A )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
# with apply_OCR = True
_UpperCAmelCase : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
_UpperCAmelCase : Union[str, Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
_UpperCAmelCase : Tuple = image_processing(A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : Optional[int] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
_UpperCAmelCase : str = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A )
self.assertListEqual(encoding.boxes , A )
# with apply_OCR = False
_UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=A )
_UpperCAmelCase : str = image_processing(A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 506
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =["""image_processor""", """tokenizer"""]
__UpperCAmelCase ="""FlavaImageProcessor"""
__UpperCAmelCase =("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _A=None , _A=None , **_A ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__lowerCAmelCase = kwargs.pop('feature_extractor' )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
__lowerCAmelCase = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = False , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if images is not None:
__lowerCAmelCase = self.image_processor(
_A , return_image_mask=_A , return_codebook_pixels=_A , return_tensors=_A , **_A , )
if text is not None and images is not None:
encoding.update(_A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def A__ ( self , *_A , **_A ):
return self.tokenizer.batch_decode(*_A , **_A )
def A__ ( self , *_A , **_A ):
return self.tokenizer.decode(*_A , **_A )
@property
def A__ ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def A__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 102
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/bart-large-mnli"""
__UpperCAmelCase =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__UpperCAmelCase ="""text_classifier"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSequenceClassification
__UpperCAmelCase =["""text""", ["""text"""]]
__UpperCAmelCase =["""text"""]
def A__ ( self ):
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__lowerCAmelCase = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def A__ ( self , _A , _A ):
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(_A ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , _A ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : int = LEDConfig
__snake_case : int = {}
__snake_case : Any = """gelu"""
def __init__( self :Optional[Any] , __lowercase :List[Any] , __lowercase :int=13 , __lowercase :Union[str, Any]=7 , __lowercase :List[Any]=True , __lowercase :List[Any]=False , __lowercase :str=99 , __lowercase :Optional[int]=32 , __lowercase :List[Any]=2 , __lowercase :str=4 , __lowercase :str=37 , __lowercase :Any=0.1 , __lowercase :List[Any]=0.1 , __lowercase :Optional[Any]=20 , __lowercase :Union[str, Any]=2 , __lowercase :str=1 , __lowercase :List[str]=0 , __lowercase :List[Any]=4 , ):
__lowerCamelCase : Union[str, Any] =parent
__lowerCamelCase : List[Any] =batch_size
__lowerCamelCase : str =seq_length
__lowerCamelCase : List[str] =is_training
__lowerCamelCase : Dict =use_labels
__lowerCamelCase : int =vocab_size
__lowerCamelCase : Union[str, Any] =hidden_size
__lowerCamelCase : Any =num_hidden_layers
__lowerCamelCase : List[Any] =num_attention_heads
__lowerCamelCase : str =intermediate_size
__lowerCamelCase : Optional[Any] =hidden_dropout_prob
__lowerCamelCase : Optional[Any] =attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] =max_position_embeddings
__lowerCamelCase : Optional[Any] =eos_token_id
__lowerCamelCase : str =pad_token_id
__lowerCamelCase : Any =bos_token_id
__lowerCamelCase : Union[str, Any] =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : List[Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : Optional[Any] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : List[str] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : int =prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__lowerCamelCase : Optional[Any] =tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Any =global_attention_mask
return config, inputs_dict
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] , __lowercase :List[str] ):
__lowerCamelCase : int =TFLEDModel(config=__lowercase ).get_decoder()
__lowerCamelCase : Dict =inputs_dict['''input_ids''']
__lowerCamelCase : Optional[Any] =input_ids[:1, :]
__lowerCamelCase : List[Any] =inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase : List[Any] =1
# first forward pass
__lowerCamelCase : List[Any] =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__lowerCamelCase , __lowerCamelCase : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Union[str, Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Dict =tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : str =model(__lowercase , attention_mask=__lowercase )[0]
__lowerCamelCase : List[str] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : List[str] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Dict =output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__snake_case : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__snake_case : Optional[Any] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case : int = True
__snake_case : int = False
__snake_case : Optional[int] = False
__snake_case : Optional[int] = False
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int =TFLEDModelTester(self )
__lowerCamelCase : Any =ConfigTester(self , config_class=__lowercase )
def __lowercase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self :str ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int =tf.zeros_like(inputs_dict['''attention_mask'''] )
__lowerCamelCase : List[Any] =2
__lowerCamelCase : Optional[Any] =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__lowerCamelCase : Tuple =True
__lowerCamelCase : Optional[Any] =self.model_tester.seq_length
__lowerCamelCase : List[str] =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase :Any ):
__lowerCamelCase : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase :List[str] ):
__lowerCamelCase : Any =[t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Optional[int] =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] =True
__lowerCamelCase : int =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : str =model_class(__lowercase )
__lowerCamelCase : Any =model(self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCamelCase : int =len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__lowerCamelCase : Optional[int] =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Any =True
__lowerCamelCase : Union[str, Any] =model_class(__lowercase )
__lowerCamelCase : Tuple =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : Optional[int] =True
__lowerCamelCase : Dict =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowercase ( self :int ):
pass
def __lowercase ( self :int ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
_UpperCamelCase = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[str] ):
__lowerCamelCase : Dict =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__lowerCamelCase : List[str] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Dict =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Optional[int] =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : Dict =model(**__lowercase )[0]
__lowerCamelCase : Optional[int] =(1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : Dict =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__lowerCamelCase : Union[str, Any] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Any =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Tuple =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : List[Any] =model(**__lowercase )[0]
__lowerCamelCase : str =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : List[str] =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 )
| 179
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCamelCase__ :
def __init__( self : Optional[int] , _lowercase : int ):
A = size
# approximate the overall size of segment tree with given value
A = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
A = [0 for i in range(0 , 4 * size )]
A = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __a ( self : Union[str, Any] , _lowercase : int ):
return idx * 2
def __a ( self : Tuple , _lowercase : int ):
return idx * 2 + 1
def __a ( self : Optional[int] , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : list[int] ):
if left_element == right_element:
A = a[left_element - 1]
else:
A = (left_element + right_element) // 2
self.build(self.left(_lowercase ) , _lowercase , _lowercase , _lowercase )
self.build(self.right(_lowercase ) , mid + 1 , _lowercase , _lowercase )
A = max(
self.segment_tree[self.left(_lowercase )] , self.segment_tree[self.right(_lowercase )] )
def __a ( self : List[Any] , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ):
if self.flag[idx] is True:
A = self.lazy[idx]
A = False
if left_element != right_element:
A = self.lazy[idx]
A = self.lazy[idx]
A = True
A = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
A = val
if left_element != right_element:
A = val
A = val
A = True
A = True
return True
A = (left_element + right_element) // 2
self.update(self.left(_lowercase ) , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.update(self.right(_lowercase ) , mid + 1 , _lowercase , _lowercase , _lowercase , _lowercase )
A = max(
self.segment_tree[self.left(_lowercase )] , self.segment_tree[self.right(_lowercase )] )
return True
def __a ( self : List[Any] , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ):
if self.flag[idx] is True:
A = self.lazy[idx]
A = False
if left_element != right_element:
A = self.lazy[idx]
A = self.lazy[idx]
A = True
A = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
A = (left_element + right_element) // 2
A = self.query(self.left(_lowercase ) , _lowercase , _lowercase , _lowercase , _lowercase )
A = self.query(self.right(_lowercase ) , mid + 1 , _lowercase , _lowercase , _lowercase )
return max(_lowercase , _lowercase )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _lowercase , _lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCamelCase : List[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCamelCase : str = 15
UpperCamelCase : int = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 716
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __a ( self : Dict , _lowercase : int , _lowercase : Any , _lowercase : int ):
A = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
A = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 )
A = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
for example in examples:
A = video_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{'score': ANY(_lowercase ), 'label': ANY(_lowercase )},
{'score': ANY(_lowercase ), 'label': ANY(_lowercase )},
] , )
@require_torch
def __a ( self : str ):
A = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
A = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
A = pipeline(
'video-classification' , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
A = video_classifier(_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def __a ( self : Dict ):
pass
| 91
| 0
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a__ : Dict = 'src/transformers'
a__ : Tuple = 'docs/source/en'
a__ : str = '.'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start prompt.
UpperCAmelCase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a__ : List[Any] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
a__ : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
a__ : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ : Union[str, Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
a__ : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = 2 if text == '''✅''' or text == '''❌''' else len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = (width - text_length) // 2
UpperCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = None
if attr_name.endswith('''Tokenizer''' ):
UpperCAmelCase = slow_tokenizers
UpperCAmelCase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
UpperCAmelCase = fast_tokenizers
UpperCAmelCase = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = tf_models
UpperCAmelCase = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = flax_models
UpperCAmelCase = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = pt_models
UpperCAmelCase = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase = True
break
# Try again after removing the last word in the name
UpperCAmelCase = ''''''.join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
UpperCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase = '''|''' + '''|'''.join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
UpperCAmelCase = {True: '''✅''', False: '''❌'''}
for name in model_names:
UpperCAmelCase = model_name_to_prefix[name]
UpperCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict=False ) -> Any:
"""simple docstring"""
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
UpperCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a__ : Optional[int] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , a_ : str = "▁" , a_ : bool = True , a_ : Union[str, AddedToken] = "<unk>" , a_ : Union[str, AddedToken] = "</s>" , a_ : Union[str, AddedToken] = "<pad>" , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
UpperCAmelCase_ : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase_ : Optional[int] = token_dict["""token"""]
UpperCAmelCase_ : Tuple = Tokenizer(Unigram() )
UpperCAmelCase_ : Optional[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
UpperCAmelCase_ : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a_ , add_prefix_space=a_ ),
pre_tokenizers.Digits(individual_digits=a_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase_ : str = decoders.Metaspace(replacement=a_ , add_prefix_space=a_ )
UpperCAmelCase_ : List[Any] = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
UpperCAmelCase_ : Dict = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(a_ , a_ )
def a ( self : int , a_ : Union[str, List[str]] , a_ : int = 80_00 , a_ : bool = True , )-> int:
"""simple docstring"""
UpperCAmelCase_ : int = trainers.UnigramTrainer(
vocab_size=a_ , special_tokens=self.special_tokens_list , show_progress=a_ , )
if isinstance(a_ , a_ ):
UpperCAmelCase_ : str = [files]
self._tokenizer.train(a_ , trainer=a_ )
self.add_unk_id()
def a ( self : List[Any] , a_ : Union[Iterator[str], Iterator[Iterator[str]]] , a_ : int = 80_00 , a_ : bool = True , )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=a_ , special_tokens=self.special_tokens_list , show_progress=a_ , )
self._tokenizer.train_from_iterator(a_ , trainer=a_ )
self.add_unk_id()
def a ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = json.loads(self._tokenizer.to_str() )
UpperCAmelCase_ : Any = self.special_tokens["""unk"""]["""id"""]
UpperCAmelCase_ : Tuple = Tokenizer.from_str(json.dumps(a_ ) )
| 470
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int
lowerCamelCase : int
class UpperCAmelCase_ :
def __init__( self : Any , UpperCAmelCase__ : int ) -> int:
lowerCAmelCase = [[] for _ in range(UpperCAmelCase__ )]
lowerCAmelCase = size
def __getitem__( self : int , UpperCAmelCase__ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __UpperCAmelCase ( self : List[Any] ) -> int:
return self._size
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int | None:
lowerCAmelCase = deque([start_vertex] )
lowerCAmelCase = [None] * self.size
lowerCAmelCase = 0
while queue:
lowerCAmelCase = queue.popleft()
lowerCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase = current_distance + edge.weight
lowerCAmelCase = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ChineseCLIPFeatureExtractor"""]
__snake_case =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513
| 1
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[str]:
super().__init__()
UpperCamelCase : str = value_function
UpperCamelCase : Tuple = unet
UpperCamelCase : Dict = scheduler
UpperCamelCase : Dict = env
UpperCamelCase : List[str] = env.get_dataset()
UpperCamelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCamelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase : Optional[Any] = {}
for key in self.data.keys():
try:
UpperCamelCase : Tuple = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase : Optional[int] = env.observation_space.shape[0]
UpperCamelCase : List[str] = env.action_space.shape[0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
return (x_in - self.means[key]) / self.stds[key]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
return x_in * self.stds[key] + self.means[key]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
if type(SCREAMING_SNAKE_CASE_ ) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE_ ) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
return x_in.to(self.unet.device )
return torch.tensor(SCREAMING_SNAKE_CASE_, device=self.unet.device )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
for key, val in cond.items():
UpperCamelCase : List[Any] = val.clone()
return x_in
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : str = x.shape[0]
UpperCamelCase : str = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase : Optional[Any] = torch.full((batch_size,), SCREAMING_SNAKE_CASE_, device=self.unet.device, dtype=torch.long )
for _ in range(SCREAMING_SNAKE_CASE_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase : List[str] = self.value_function(x.permute(0, 2, 1 ), SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : Optional[int] = torch.autograd.grad([y.sum()], [x] )[0]
UpperCamelCase : Optional[int] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCamelCase : Optional[Any] = model_std * grad
UpperCamelCase : List[Any] = 0
UpperCamelCase : str = x.detach()
UpperCamelCase : Dict = x + scale * grad
UpperCamelCase : Optional[int] = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : Dict = self.unet(x.permute(0, 2, 1 ), SCREAMING_SNAKE_CASE_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, predict_epsilon=SCREAMING_SNAKE_CASE_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCamelCase : str = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : int = self.to_torch(SCREAMING_SNAKE_CASE_ )
return x, y
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1 ) -> Dict:
# normalize the observations and create batch dimension
UpperCamelCase : Union[str, Any] = self.normalize(SCREAMING_SNAKE_CASE_, 'observations' )
UpperCamelCase : int = obs[None].repeat(SCREAMING_SNAKE_CASE_, axis=0 )
UpperCamelCase : List[str] = {0: self.to_torch(SCREAMING_SNAKE_CASE_ )}
UpperCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase : Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE_, device=self.unet.device )
UpperCamelCase : Dict = self.reset_xa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.action_dim )
UpperCamelCase : List[Any] = self.to_torch(SCREAMING_SNAKE_CASE_ )
# run the diffusion process
UpperCamelCase , UpperCamelCase : Optional[Any] = self.run_diffusion(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# sort output trajectories by value
UpperCamelCase : Union[str, Any] = y.argsort(0, descending=SCREAMING_SNAKE_CASE_ ).squeeze()
UpperCamelCase : Union[str, Any] = x[sorted_idx]
UpperCamelCase : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCamelCase : Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase : Union[str, Any] = self.de_normalize(SCREAMING_SNAKE_CASE_, key='actions' )
# select the action with the highest value
if y is not None:
UpperCamelCase : List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase : List[Any] = np.random.randint(0, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 40
|
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) <= 1:
return lst
__A : Any = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A : str = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 239
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
snake_case_ : List[str] = random.Random()
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str=1.0, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Dict=None):
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=2_4 , lowerCamelCase__=2_4 , lowerCamelCase__=0.0 , lowerCamelCase__=1_6_0_0_0 , lowerCamelCase__=True , lowerCamelCase__=True , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = feature_size
UpperCamelCase = num_mel_bins
UpperCamelCase = padding_value
UpperCamelCase = sampling_rate
UpperCamelCase = return_attention_mask
UpperCamelCase = do_normalize
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase = feature_extractor(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase = np.asarray(lowerCamelCase__ )
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase = [None, 1_6, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = feature_extractor(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase = [None, 1_6, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = feature_extractor(
lowerCamelCase__ , max_length=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feature_extractor(
lowerCamelCase__ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feature_extractor(
lowerCamelCase__ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = feature_extractor(
lowerCamelCase__ , padding='''longest''' , max_length=1_6 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
UpperCamelCase = inputs.input_features
UpperCamelCase = inputs.attention_mask
UpperCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def UpperCAmelCase ( self ):
'''simple docstring'''
import torch
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
| 350
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''BlipImageProcessor'''
_snake_case = '''AutoTokenizer'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
# add QFormer tokenizer
UpperCamelCase = qformer_tokenizer
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
UpperCamelCase = BatchFeature()
if text is not None:
UpperCamelCase = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
encoding.update(lowerCamelCase__ )
UpperCamelCase = self.qformer_tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = qformer_text_encoding.pop('''input_ids''' )
UpperCamelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
UpperCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase__ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
UpperCamelCase = os.path.join(lowerCamelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowerCamelCase__ )
return super().save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , subfolder='''qformer_tokenizer''' )
UpperCamelCase = cls._get_arguments_from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
args.append(lowerCamelCase__ )
return cls(*lowerCamelCase__ )
| 350
| 1
|
"""simple docstring"""
import math
import sys
def __magic_name__ ( _lowerCamelCase : str ):
__a : Optional[int] = """"""
try:
with open(_lowerCamelCase , """rb""" ) as binary_file:
__a : Union[str, Any] = binary_file.read()
for dat in data:
__a : Union[str, Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __magic_name__ ( _lowerCamelCase : str ):
__a : Tuple = {"""0""": """0""", """1""": """1"""}
__a , __a : List[Any] = """""", """"""
__a : Dict = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a : str = lexicon[curr_string]
result += last_match_id
__a : Union[str, Any] = last_match_id + """0"""
if math.loga(_lowerCamelCase ).is_integer():
__a : List[Any] = {}
for curr_key in list(_lowerCamelCase ):
__a : Union[str, Any] = lexicon.pop(_lowerCamelCase )
__a : Any = new_lex
__a : int = last_match_id + """1"""
index += 1
__a : int = """"""
return result
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : List[str] = 8
try:
with open(_lowerCamelCase , """wb""" ) as opened_file:
__a : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __magic_name__ ( _lowerCamelCase : str ):
__a : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__a : Dict = data_bits[counter:]
__a : Any = data_bits[counter + 1 :]
return data_bits
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
__a : Optional[Any] = read_file_binary(_lowerCamelCase )
__a : str = remove_prefix(_lowerCamelCase )
__a : str = decompress_data(_lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 581
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : int = vocab_size
__a : Union[str, Any] = n_positions
__a : List[str] = n_embd
__a : Union[str, Any] = n_layer
__a : List[str] = n_head
__a : int = n_inner
__a : Any = activation_function
__a : List[str] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : str = initializer_range
__a : Dict = scale_attn_weights
__a : Optional[int] = use_cache
__a : Optional[Any] = scale_attn_by_inverse_layer_idx
__a : Optional[Any] = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Any = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 581
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A__ ( *SCREAMING_SNAKE_CASE__) -> Any:
with open(SCREAMING_SNAKE_CASE__ , """r""") as fh:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX)
try:
print(*SCREAMING_SNAKE_CASE__)
finally:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN)
__UpperCAmelCase : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__UpperCAmelCase : List[str] = torch.device("cuda", local_rank)
__UpperCAmelCase : Any = socket.gethostname()
__UpperCAmelCase : str = f'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCAmelCase : str = dist.get_rank()
__UpperCAmelCase : Optional[int] = dist.get_world_size()
printflock(f'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(f'{gpu} is broken')
raise
| 155
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case ( ctypes.Structure ):
'''simple docstring'''
lowerCAmelCase__ = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def A__ ( ) -> List[Any]:
if os.name == "nt":
__snake_case: str = CursorInfo()
__snake_case: List[str] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
__snake_case: Optional[int] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def A__ ( ) -> List[Any]:
if os.name == "nt":
__snake_case: Dict = CursorInfo()
__snake_case: List[str] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
__snake_case: List[str] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def A__ ( ) -> int:
try:
hide_cursor()
yield
finally:
show_cursor()
| 155
| 1
|
'''simple docstring'''
import sys
import turtle
def __snake_case ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __snake_case ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_UpperCamelCase : int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_UpperCamelCase : List[str] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 396
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : str = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
__UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def __snake_case ( lowerCAmelCase : List[Any] ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,):
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token
super().__init__(
errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,)
with open(a ,encoding='utf-8' ) as vocab_handle:
__UpperCAmelCase = json.load(a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a ,encoding='utf-8' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self: Optional[Any] ):
return len(self.encoder )
def snake_case ( self: Optional[Any] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case ( self: Optional[int] ,a: Optional[int] ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = get_pairs(a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(a ):
try:
__UpperCAmelCase = word.index(a ,a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = new_word
if len(a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = word
return word
def snake_case ( self: int ,a: str ):
__UpperCAmelCase = []
for token in re.findall(self.pat ,a ):
__UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def snake_case ( self: Optional[Any] ,a: Union[str, Any] ):
return self.encoder.get(a ,self.encoder.get(self.unk_token ) )
def snake_case ( self: Any ,a: Union[str, Any] ):
return self.decoder.get(a )
def snake_case ( self: Dict ,a: Union[str, Any] ):
__UpperCAmelCase = ''.join(a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' )
__UpperCAmelCase = 0
with open(a ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__UpperCAmelCase = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ):
__UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ' ' + text
return (text, kwargs)
def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self: Any ,a: "Conversation" ):
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = self.encode(a )
if len(a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 396
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : str = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ : List[str] = random.Random()
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]:
if rng is None:
snake_case__ = global_rng
snake_case__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:List[Any] , _a:Tuple , _a:List[str]=7 , _a:List[str]=4_00 , _a:Any=20_00 , _a:Dict=1 , _a:Any=0.0 , _a:List[str]=1_60_00 , _a:Any=True , _a:Optional[Any]=True , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = min_seq_length
snake_case__ = max_seq_length
snake_case__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ = feature_size
snake_case__ = padding_value
snake_case__ = sampling_rate
snake_case__ = return_attention_mask
snake_case__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any]=False , _a:Dict=False ):
def _flatten(_a:Tuple ):
return list(itertools.chain(*_a ) )
if equal_length:
snake_case__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self:int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
snake_case__ = feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values
snake_case__ = feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case__ = np.asarray(_a )
snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values
snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
import torch
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = np.random.rand(1_00 ).astype(np.floataa )
snake_case__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple ):
from datasets import load_dataset
snake_case__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case__ = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self:int ):
# fmt: off
snake_case__ = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
snake_case__ = self._load_datasamples(1 )
snake_case__ = ASTFeatureExtractor()
snake_case__ = feature_extractor(_a , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1e-4 ) )
| 208
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger("transformers.models.speecht5")
def _snake_case ( __snake_case , __snake_case , __snake_case ):
hf_model.apply_weight_norm()
_UpperCamelCase = checkpoint['''input_conv.weight_g''']
_UpperCamelCase = checkpoint['''input_conv.weight_v''']
_UpperCamelCase = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
_UpperCamelCase = checkpoint['''output_conv.1.weight_g''']
_UpperCamelCase = checkpoint['''output_conv.1.weight_v''']
_UpperCamelCase = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , ):
if config_path is not None:
_UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = SpeechTaHifiGanConfig()
_UpperCamelCase = SpeechTaHifiGan(__snake_case )
_UpperCamelCase = torch.load(__snake_case )
load_weights(orig_checkpoint['''model''']['''generator'''] , __snake_case , __snake_case )
_UpperCamelCase = np.load(__snake_case )
_UpperCamelCase = stats[0].reshape(-1 )
_UpperCamelCase = stats[1].reshape(-1 )
_UpperCamelCase = torch.from_numpy(__snake_case ).float()
_UpperCamelCase = torch.from_numpy(__snake_case ).float()
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 10
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCAmelCase__ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ = evaluate(dataset=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ )
return score
| 626
| 0
|
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[Any], a_: Any, a_: Tuple ):
_UpperCAmelCase : List[Any] = int(np.ceil((x_end - xa) / h ) )
_UpperCAmelCase : str = np.zeros((n + 1,) )
_UpperCAmelCase : Dict = ya
_UpperCAmelCase : List[str] = xa
for k in range(a_ ):
_UpperCAmelCase : Any = f(a_, y[k] )
_UpperCAmelCase : Dict = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
_UpperCAmelCase : List[Any] = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
_UpperCAmelCase : Any = f(x + h, y[k] + h * ka )
_UpperCAmelCase : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 257
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=4 , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = num_choices
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Tuple = True
__UpperCamelCase : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223
|
"""simple docstring"""
import math
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = n
SCREAMING_SNAKE_CASE__ : Tuple = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE__ : List[str] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = w
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE__ : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 223
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase_ : Dict = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase_ : Optional[Any] = """|""".join(sys.argv[1:])
UpperCAmelCase_ : List[Any] = re.compile(rf'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 704
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Any):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , '''vision''')
self.check_model_type(lowercase_)
def __call__( self : int , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[Any]):
'''simple docstring'''
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : List[str]):
'''simple docstring'''
return {}, {}, {}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = load_image(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.size
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=lowercase_ , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model(**lowercase_)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[str] = (output * 255 / np.max(lowercase_)).astype('''uint8''')
SCREAMING_SNAKE_CASE_ : List[Any] = Image.fromarray(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = predicted_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = depth
return output_dict
| 176
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : Tuple = 12_80_22
__lowerCamelCase : Optional[Any] = 12_80_28
@require_sentencepiece
class a__ ( A__ , unittest.TestCase ):
A = MaMaaaTokenizer
A = False
A = False
A = True
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : str = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
SCREAMING_SNAKE_CASE_ : int = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(self.tmpdirname )
save_json(_A,save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A,save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE_ : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Any,**_A : Optional[int] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Union[str, Any],_A : Tuple ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "</s>"
SCREAMING_SNAKE_CASE_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0],"</s>" )
self.assertEqual(vocab_keys[1],"<unk>" )
self.assertEqual(vocab_keys[-1],"<s>" )
self.assertEqual(len(_A ),tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ),[2, 3, 4, 5, 6],)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A,["▁This", "▁is", "▁a", "▁t", "est"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A,"This is a test" )
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="facebook/m2m100_418M",revision="c168bae485c864188cf9aa0e4108b0b6934dc91e",)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
A = 'facebook/m2m100_418M'
A = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
A = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
A = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __UpperCamelCase ( cls : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name,src_lang="en",tgt_lang="fr" )
SCREAMING_SNAKE_CASE_ : int = 1
return cls
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("ar" ),12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ),12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ),12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ),12_8063 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.get_vocab()
self.assertEqual(len(_A ),self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"],3 )
self.assertIn(self.tokenizer.get_lang_token("en" ),_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "en"
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens,_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertIn(_A,self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE_ : Tuple = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.decode(_A,skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=_A )
self.assertEqual(_A,_A )
self.assertNotIn(self.tokenizer.eos_token,_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[str] = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id,_A )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "en"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "fr"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(self.src_text,text_target=self.tgt_text,padding=_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = shift_tokens_right(
batch["labels"],self.tokenizer.pad_token_id,self.tokenizer.eos_token_id )
for k in batch:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
SCREAMING_SNAKE_CASE_ : List[str] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
@require_torch
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer._build_translation_inputs("A test",return_tensors="pt",src_lang="en",tgt_lang="ar" )
self.assertEqual(
nested_simplify(_A ),{
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
},)
| 216
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a__ :
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : str,_A : Any ):
"""simple docstring"""
return None
class a__ :
def __UpperCamelCase ( self : Tuple,_A : Any,_A : Any,_A : str,_A : Optional[int] ):
"""simple docstring"""
return None
class a__ ( unittest.TestCase ):
A = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"tf",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"pt",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_A ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE_ : str = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A,"pt",12,_A )
@require_tf
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : List[str] = self._test_export(_A,"tf",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : int = self._test_export(_A,"pt",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __UpperCamelCase ( self : List[str],_A : Optional[int],_A : Optional[int],_A : Optional[Any],_A : Dict=None,**_A : List[Any] ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Dict = Path(_A ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A,_A,_A,_A,_A,**_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Optional[int] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"pt" )
@require_tf
@require_tokenizers
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE_ : List[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"tf" )
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionPipeline(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = infer_shapes(_A,_A )
# Assert all variables are present
self.assertEqual(len(_A ),len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3],_A )
self.assertSequenceEqual(variable_names[3:],_A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name],{0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"],{0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"],{0: "batch"} )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs(),_A,_A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ),3 )
# Should have exactly the same input names
self.assertEqual(set(_A ),set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A,(tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = ensure_valid_input(FuncNonContiguousArgs(),_A,_A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ),1 )
self.assertEqual(len(_A ),1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0],tokens["input_ids"] )
self.assertEqual(ordered_input_names[0],"input_ids" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ),"-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx",generated.as_posix() )
| 216
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a__ :
def __init__( self : str ,a__ : Optional[Any] ,a__ : Union[str, Any]=100 ,a__ : int=13 ,a__ : Dict=30 ,a__ : List[str]=2 ,a__ : int=3 ,a__ : Optional[int]=True ,a__ : Union[str, Any]=True ,a__ : Tuple=32 ,a__ : Tuple=4 ,a__ : int=4 ,a__ : Tuple=37 ,a__ : Dict="gelu" ,a__ : Tuple=0.1 ,a__ : Optional[Any]=0.1 ,a__ : int=10 ,a__ : Dict=0.02 ,a__ : int=3 ,a__ : Optional[int]=None ,a__ : Dict=[0, 1, 2, 3] ,) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = parent
_lowerCAmelCase:str = 100
_lowerCAmelCase:str = batch_size
_lowerCAmelCase:Dict = image_size
_lowerCAmelCase:Union[str, Any] = patch_size
_lowerCAmelCase:Any = num_channels
_lowerCAmelCase:List[str] = is_training
_lowerCAmelCase:List[str] = use_labels
_lowerCAmelCase:Any = hidden_size
_lowerCAmelCase:Tuple = num_hidden_layers
_lowerCAmelCase:Dict = num_attention_heads
_lowerCAmelCase:str = intermediate_size
_lowerCAmelCase:List[Any] = hidden_act
_lowerCAmelCase:Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Any = type_sequence_label_size
_lowerCAmelCase:List[str] = initializer_range
_lowerCAmelCase:Optional[int] = scope
_lowerCAmelCase:Dict = out_indices
_lowerCAmelCase:int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase:List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase:int = num_patches + 1
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase:Dict = None
_lowerCAmelCase:Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase:str = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
_lowerCAmelCase:Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
_lowerCAmelCase:Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowercase ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def __UpperCamelCase ( self : Tuple ,a__ : Optional[Any] ,a__ : Optional[int] ,a__ : Tuple ,a__ : Optional[int]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Any = BeitModel(config=_lowercase)
model.to(_lowercase)
model.eval()
_lowerCAmelCase:Optional[int] = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Optional[int] ,a__ : str ,a__ : Dict ,a__ : Dict ,a__ : str) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = BeitForMaskedImageModeling(config=_lowercase)
model.to(_lowercase)
model.eval()
_lowerCAmelCase:Optional[Any] = model(_lowercase)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size))
def __UpperCamelCase ( self : Any ,a__ : Tuple ,a__ : Dict ,a__ : int ,a__ : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase:Dict = BeitForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
_lowerCAmelCase:int = model(_lowercase ,labels=_lowercase)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase:Dict = 1
_lowerCAmelCase:Tuple = BeitForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
_lowerCAmelCase:Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase:Optional[int] = model(_lowercase ,labels=_lowercase)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def __UpperCamelCase ( self : Optional[int] ,a__ : List[Any] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.num_labels
_lowerCAmelCase:Any = BeitForSemanticSegmentation(_lowercase)
model.to(_lowercase)
model.eval()
_lowerCAmelCase:Optional[int] = model(_lowercase)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
_lowerCAmelCase:List[Any] = model(_lowercase ,labels=_lowercase)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
def __UpperCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_lowerCAmelCase:Any = self.prepare_config_and_inputs()
_lowerCAmelCase:int = config_and_inputs
_lowerCAmelCase:int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = BeitModelTester(self)
_lowerCAmelCase:str = ConfigTester(self ,config_class=_lowercase ,has_text_modality=_lowercase ,hidden_size=37)
def __UpperCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''')
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Tuple = model_class(_lowercase)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
_lowerCAmelCase:List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase ,nn.Linear))
def __UpperCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Any = model_class(_lowercase)
_lowerCAmelCase:Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase:Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase:Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowercase)
def __UpperCamelCase ( self : int) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __UpperCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowerCAmelCase:List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowercase), BeitForMaskedImageModeling]:
continue
_lowerCAmelCase:Optional[int] = model_class(_lowercase)
model.to(_lowercase)
model.train()
_lowerCAmelCase:Tuple = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase)
_lowerCAmelCase:str = model(**_lowercase).loss
loss.backward()
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase:Dict = False
_lowerCAmelCase:List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowercase), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase:int = model_class(_lowercase)
model.gradient_checkpointing_enable()
model.to(_lowercase)
model.train()
_lowerCAmelCase:List[Any] = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase)
_lowerCAmelCase:List[Any] = model(**_lowercase).loss
loss.backward()
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:int = _config_zero_init(_lowercase)
for model_class in self.all_model_classes:
_lowerCAmelCase:Union[str, Any] = model_class(config=_lowercase)
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Union[str, Any] = BeitModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''').to(_lowercase)
_lowerCAmelCase:str = self.default_image_processor
_lowerCAmelCase:int = prepare_img()
_lowerCAmelCase:Optional[int] = image_processor(images=_lowercase ,return_tensors='''pt''').pixel_values.to(_lowercase)
# prepare bool_masked_pos
_lowerCAmelCase:Optional[Any] = torch.ones((1, 196) ,dtype=torch.bool).to(_lowercase)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Dict = model(pixel_values=_lowercase ,bool_masked_pos=_lowercase)
_lowerCAmelCase:str = outputs.logits
# verify the logits
_lowerCAmelCase:str = torch.Size((1, 196, 8192))
self.assertEqual(logits.shape ,_lowercase)
_lowerCAmelCase:int = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]]).to(_lowercase)
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,_lowercase ,atol=1E-2))
@slow
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''').to(_lowercase)
_lowerCAmelCase:Tuple = self.default_image_processor
_lowerCAmelCase:Any = prepare_img()
_lowerCAmelCase:Dict = image_processor(images=_lowercase ,return_tensors='''pt''').to(_lowercase)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Union[str, Any] = model(**_lowercase)
_lowerCAmelCase:Dict = outputs.logits
# verify the logits
_lowerCAmelCase:Optional[Any] = torch.Size((1, 1000))
self.assertEqual(logits.shape ,_lowercase)
_lowerCAmelCase:Any = torch.tensor([-1.2385, -1.0987, -1.0108]).to(_lowercase)
self.assertTrue(torch.allclose(logits[0, :3] ,_lowercase ,atol=1E-4))
_lowerCAmelCase:int = 281
self.assertEqual(logits.argmax(-1).item() ,_lowercase)
@slow
def __UpperCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:str = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''').to(
_lowercase)
_lowerCAmelCase:int = self.default_image_processor
_lowerCAmelCase:int = prepare_img()
_lowerCAmelCase:Union[str, Any] = image_processor(images=_lowercase ,return_tensors='''pt''').to(_lowercase)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Any = model(**_lowercase)
_lowerCAmelCase:str = outputs.logits
# verify the logits
_lowerCAmelCase:List[str] = torch.Size((1, 2_1841))
self.assertEqual(logits.shape ,_lowercase)
_lowerCAmelCase:Union[str, Any] = torch.tensor([1.6881, -0.2787, 0.5901]).to(_lowercase)
self.assertTrue(torch.allclose(logits[0, :3] ,_lowercase ,atol=1E-4))
_lowerCAmelCase:Dict = 2396
self.assertEqual(logits.argmax(-1).item() ,_lowercase)
@slow
def __UpperCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
_lowerCAmelCase:str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
_lowerCAmelCase:int = model.to(_lowercase)
_lowerCAmelCase:Any = BeitImageProcessor(do_resize=_lowercase ,size=640 ,do_center_crop=_lowercase)
_lowerCAmelCase:List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''')
_lowerCAmelCase:Tuple = Image.open(ds[0]['''file'''])
_lowerCAmelCase:Any = image_processor(images=_lowercase ,return_tensors='''pt''').to(_lowercase)
# forward pass
with torch.no_grad():
_lowerCAmelCase:List[str] = model(**_lowercase)
_lowerCAmelCase:str = outputs.logits
# verify the logits
_lowerCAmelCase:Optional[Any] = torch.Size((1, 150, 160, 160))
self.assertEqual(logits.shape ,_lowercase)
_lowerCAmelCase:int = version.parse(PIL.__version__) < version.parse('''9.0.0''')
if is_pillow_less_than_a:
_lowerCAmelCase:Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] ,device=_lowercase ,)
else:
_lowerCAmelCase:List[str] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] ,device=_lowercase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowercase ,atol=1E-4))
@slow
def __UpperCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Dict = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
_lowerCAmelCase:Optional[Any] = model.to(_lowercase)
_lowerCAmelCase:List[Any] = BeitImageProcessor(do_resize=_lowercase ,size=640 ,do_center_crop=_lowercase)
_lowerCAmelCase:Tuple = load_dataset('''hf-internal-testing/fixtures_ade20k''' ,split='''test''')
_lowerCAmelCase:Any = Image.open(ds[0]['''file'''])
_lowerCAmelCase:Optional[int] = image_processor(images=_lowercase ,return_tensors='''pt''').to(_lowercase)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Optional[Any] = model(**_lowercase)
_lowerCAmelCase:str = outputs.logits.detach().cpu()
_lowerCAmelCase:List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase ,target_sizes=[(500, 300)])
_lowerCAmelCase:int = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape ,_lowercase)
_lowerCAmelCase:List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase)
_lowerCAmelCase:Optional[Any] = torch.Size((160, 160))
self.assertEqual(segmentation[0].shape ,_lowercase)
| 721
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase ( snake_case : Optional[int] ):
_lowerCAmelCase:List[Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
_lowerCAmelCase:List[Any] = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case )
if matches:
_lowerCAmelCase:str = float(matches[1] )
_lowerCAmelCase:str = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase:int = 1001
_lowerCAmelCase:Union[str, Any] = '''imagenet-1k-id2label.json'''
_lowerCAmelCase:Optional[int] = '''huggingface/label-files'''
_lowerCAmelCase:List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:Union[str, Any] = {int(snake_case ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase:Optional[Any] = '''background'''
_lowerCAmelCase:Optional[Any] = idalabel
_lowerCAmelCase:List[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( ):
_lowerCAmelCase:Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase:Optional[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Any=False ):
_lowerCAmelCase:List[str] = get_mobilenet_va_config(snake_case )
# Load 🤗 model
_lowerCAmelCase:Any = MobileNetVaForImageClassification(snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case , snake_case , snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase:Dict = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
_lowerCAmelCase:List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCAmelCase:List[str] = model(**snake_case )
_lowerCAmelCase:int = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase:Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase:Any = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_lowerCAmelCase:Dict = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
_lowerCAmelCase:List[str] = '''google/''' + model_name
image_processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 439
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 119
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
UpperCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : Dict , __lowercase : List[str] , __lowercase : int="<s>" , __lowercase : Any="</s>" , __lowercase : Tuple="</s>" , __lowercase : Dict="<s>" , __lowercase : Dict="<unk>" , __lowercase : List[Any]="<pad>" , __lowercase : Dict="<mask>" , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : Union[str, Any]=None , __lowercase : Optional[Dict[str, Any]] = None , __lowercase : List[str]=None , __lowercase : List[str]=False , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
__lowercase =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
__lowercase =legacy_behaviour
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , tokenizer_file=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__lowercase , **__lowercase , )
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
__lowercase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase =1
__lowercase =len(self.sp_model )
__lowercase ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowercase )
}
__lowercase ={v: k for k, v in self.lang_code_to_id.items()}
__lowercase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase =src_lang if src_lang is not None else 'eng_Latn'
__lowercase =self.lang_code_to_id[self._src_lang]
__lowercase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
"""simple docstring"""
__lowercase =self.__dict__.copy()
__lowercase =None
__lowercase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case ( self : Tuple ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def snake_case ( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
__lowercase =[1] * len(self.prefix_tokens )
__lowercase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def snake_case ( self : Optional[Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : Optional[Any] , __lowercase : Dict , __lowercase : str , __lowercase : Optional[str] , __lowercase : Optional[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowercase =src_lang
__lowercase =self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =self.convert_tokens_to_ids(__lowercase )
__lowercase =tgt_lang_id
return inputs
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Any , __lowercase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def snake_case ( self : int , __lowercase : int ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase =self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case ( self : int , __lowercase : Any ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case ( self : str , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =''.join(__lowercase ).replace(__lowercase , ' ' ).strip()
return out_string
def snake_case ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase =os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , 'wb' ) as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def snake_case ( self : List[str] , __lowercase : List[str] , __lowercase : str = "eng_Latn" , __lowercase : Optional[List[str]] = None , __lowercase : str = "fra_Latn" , **__lowercase : List[str] , ):
"""simple docstring"""
__lowercase =src_lang
__lowercase =tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self : Optional[Any] , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__lowercase =[]
__lowercase =[self.eos_token_id, self.cur_lang_code]
else:
__lowercase =[self.cur_lang_code]
__lowercase =[self.eos_token_id]
def snake_case ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =self.lang_code_to_id[lang]
if self.legacy_behaviour:
__lowercase =[]
__lowercase =[self.eos_token_id, self.cur_lang_code]
else:
__lowercase =[self.cur_lang_code]
__lowercase =[self.eos_token_id]
| 119
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_lowerCamelCase , **_lowerCamelCase ) -> Any:
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Union[str, Any] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
A_ : Tuple = len(_lowerCamelCase )
self.assertGreater(_lowerCamelCase , 0 )
self.assertEqual(
_lowerCamelCase , [
{
"""score""": ANY(_lowerCamelCase ),
"""label""": ANY(_lowerCamelCase ),
"""box""": {"""xmin""": ANY(_lowerCamelCase ), """ymin""": ANY(_lowerCamelCase ), """xmax""": ANY(_lowerCamelCase ), """ymax""": ANY(_lowerCamelCase )},
}
for i in range(_lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@require_torch
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
A_ : List[Any] = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Tuple = pipeline("""zero-shot-object-detection""" )
A_ : str = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
A_ : List[Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = 0.2
A_ : Optional[int] = pipeline("""zero-shot-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : int = 2
A_ : Optional[Any] = pipeline("""zero-shot-object-detection""" )
A_ : str = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 385
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
A_ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text("""CPU""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Tuple = [mem.copy() for i in range(4 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""GPU""" , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : List[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : str = Text("""Model""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
A_ : List[Any] = []
A_ : str = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : Dict = fill.copy().set_fill(_lowerCamelCase , opacity=0.8 )
target.move_to(_lowerCamelCase )
model_arr.append(_lowerCamelCase )
A_ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCamelCase )
self.add(*_lowerCamelCase , *_lowerCamelCase )
A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
A_ : Tuple = [meta_mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""Disk""" , font_size=24 )
A_ : Union[str, Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Union[str, Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCamelCase )
A_ : List[str] = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) )
A_ : Optional[int] = Square(0.3 )
input.set_fill(_lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCamelCase , buff=0.5 )
self.play(Write(_lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCamelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCamelCase ) )
self.play(FadeOut(_lowerCamelCase ) )
A_ : Optional[int] = Arrow(start=_lowerCamelCase , end=_lowerCamelCase , color=_lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A_ : Union[str, Any] = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) )
A_ : Any = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCamelCase ) , Circumscribe(model_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A_ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A_ : List[str] = AnimationGroup(
FadeOut(_lowerCamelCase , run_time=0.5 ) , MoveToTarget(_lowerCamelCase , run_time=0.5 ) , FadeIn(_lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A_ : Any = a_c
A_ : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCamelCase ) , FadeOut(_lowerCamelCase , run_time=0.5 ) , )
A_ : Tuple = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) , MoveToTarget(_lowerCamelCase ) )
self.wait()
| 385
| 1
|
'''simple docstring'''
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a__ : List[Any] , a__ : Optional[Any] , a__ : int ):
UpperCAmelCase = name
UpperCAmelCase = value
UpperCAmelCase = weight
def __repr__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __snake_case ( self : Optional[Any] ):
return self.value
def __snake_case ( self : List[Any] ):
return self.name
def __snake_case ( self : List[Any] ):
return self.weight
def __snake_case ( self : Optional[int] ):
return self.value / self.weight
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = []
UpperCAmelCase, UpperCAmelCase = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """marian"""
lowerCamelCase = ["""past_key_values"""]
lowerCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase__ : Dict=5_8101 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Tuple=5_8100 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=5_8100 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : int , ) -> int:
"""simple docstring"""
snake_case : List[Any] = vocab_size
snake_case : Optional[int] = decoder_vocab_size or vocab_size
snake_case : int = max_position_embeddings
snake_case : Tuple = d_model
snake_case : int = encoder_ffn_dim
snake_case : Optional[int] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : List[str] = decoder_ffn_dim
snake_case : List[str] = decoder_layers
snake_case : List[str] = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : Union[str, Any] = activation_function
snake_case : int = init_std
snake_case : Dict = encoder_layerdrop
snake_case : Dict = decoder_layerdrop
snake_case : List[Any] = use_cache
snake_case : int = encoder_layers
snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case : List[Any] = {0: '''batch'''}
snake_case : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case ,snake_case : Union[str, Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : int = super(UpperCamelCase__ , self ).outputs
if self.use_past:
snake_case ,snake_case : Optional[Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
snake_case : Optional[int] = seq_length if not self.use_past else 1
snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case : Any = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Dict = common_inputs['''input_ids'''].shape
snake_case : Any = common_inputs['''decoder_input_ids'''].shape[1]
snake_case ,snake_case : Tuple = self.num_attention_heads
snake_case : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Union[str, Any] = decoder_seq_length + 3
snake_case : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
snake_case : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case ,snake_case : str = self.num_layers
snake_case : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
snake_case : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
snake_case : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case : int = seqlen + 2
snake_case ,snake_case : int = self.num_layers
snake_case ,snake_case : List[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Tuple = common_inputs['''attention_mask'''].dtype
snake_case : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
snake_case : str = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : List[str] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
snake_case : str = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case : str = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Union[str, Any] = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
snake_case : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : List[Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case : Optional[int] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 638
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase ( A__ ) -> Any:
_snake_case : int = SwinConfig(image_size=1_92 )
if "base" in model_name:
_snake_case : str = 6
_snake_case : str = 1_28
_snake_case : str = (2, 2, 18, 2)
_snake_case : Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
_snake_case : Tuple = 12
_snake_case : str = 1_92
_snake_case : str = (2, 2, 18, 2)
_snake_case : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
_snake_case : Dict = window_size
_snake_case : List[str] = embed_dim
_snake_case : str = depths
_snake_case : Tuple = num_heads
return config
def UpperCAmelCase ( A__ ) -> Optional[int]:
if "encoder.mask_token" in name:
_snake_case : int = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
_snake_case : List[Any] = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
_snake_case : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_snake_case : Optional[Any] = """layernorm.weight"""
if name == "encoder.norm.bias":
_snake_case : Any = """layernorm.bias"""
if "decoder" in name:
pass
else:
_snake_case : Dict = """swin.""" + name
return name
def UpperCAmelCase ( A__ , A__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
_snake_case : Optional[int] = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_snake_case : Union[str, Any] = key.split(""".""" )
_snake_case : Union[str, Any] = int(key_split[2] )
_snake_case : Any = int(key_split[4] )
_snake_case : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : Any = val[
dim : dim * 2, :
]
_snake_case : Tuple = val[-dim:, :]
else:
_snake_case : Optional[int] = val[
:dim
]
_snake_case : Union[str, Any] = val[
dim : dim * 2
]
_snake_case : Dict = val[
-dim:
]
else:
_snake_case : str = val
return orig_state_dict
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_snake_case : List[str] = torch.load(A__ , map_location="""cpu""" )["""model"""]
_snake_case : Optional[Any] = get_swin_config(A__ )
_snake_case : Optional[int] = SwinForMaskedImageModeling(A__ )
model.eval()
_snake_case : str = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Optional[Any] = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} )
_snake_case : Union[str, Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
_snake_case : Optional[Any] = image_processor(images=A__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Optional[Any] = model(**A__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 519
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 519
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__:
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : int=2 , lowerCAmelCase : str=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : str=7 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=99 , lowerCAmelCase : List[Any]=36 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Any=16 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[int]=6 , lowerCAmelCase : int=6 , lowerCAmelCase : str=3 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=1000 , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase = text_seq_length
UpperCAmelCase = (image_size // patch_size) ** 2 + 1
UpperCAmelCase = self.text_seq_length + self.image_seq_length
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = tmp_coordinate
UpperCAmelCase = tf.constant(lowerCAmelCase )
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : str )-> int:
"""simple docstring"""
UpperCAmelCase = TFLayoutLMvaModel(config=lowerCAmelCase )
# text + image
UpperCAmelCase = model(lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
UpperCAmelCase = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , training=lowerCAmelCase , )
UpperCAmelCase = model(lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase = model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase )
UpperCAmelCase = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase )
UpperCAmelCase = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a__( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 2
UpperCAmelCase = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase )
UpperCAmelCase = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[str] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ : Tuple = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__magic_name__ : Any = False
__magic_name__ : str = False
__magic_name__ : str = False
def a__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] )-> List[Any]:
"""simple docstring"""
return True
def a__( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple=False )-> dict:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(lowerCAmelCase )
if model_class in get_values(lowerCAmelCase ):
UpperCAmelCase = {
k: tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase ):
UpperCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
UpperCAmelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFLayoutLMvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
if getattr(lowerCAmelCase , '''hf_compute_loss''' , lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase )[0]
]
UpperCAmelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase = model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase = -100
UpperCAmelCase = tf.convert_to_tensor(lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase = inspect.signature(model.call ).parameters
UpperCAmelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase = signature_names.index(lowerCAmelCase )
UpperCAmelCase = label_key
UpperCAmelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase = prepared_for_class[value]
UpperCAmelCase = tuple(lowerCAmelCase )
# Send to model
UpperCAmelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : int )-> List[Any]:
"""simple docstring"""
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase ) if is_vision_available() else None
@slow
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''tf''' ).pixel_values
UpperCAmelCase = tf.constant([[1, 2]] )
UpperCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase = model(input_ids=lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase )
UpperCAmelCase = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
| 210
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__( enum.Enum ):
__magic_name__ : Tuple = 0
__magic_name__ : Union[str, Any] = 1
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "generated"
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int )-> Dict:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a__( self : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Union[str, Any] , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {}
if truncation is not None:
UpperCAmelCase = truncation
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Optional[Any]:
"""simple docstring"""
return True
def a__( self : Optional[int] , *lowerCAmelCase : Tuple , lowerCAmelCase : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase = ([prefix + arg for arg in args[0]],)
UpperCAmelCase = True
elif isinstance(args[0] , lowerCAmelCase ):
UpperCAmelCase = (prefix + args[0],)
UpperCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase = self.tokenizer(*lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
if (
isinstance(args[0] , lowerCAmelCase )
and all(isinstance(lowerCAmelCase , lowerCAmelCase ) for el in args[0] )
and all(len(lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase : List[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self._parse_and_tokenize(lowerCAmelCase , truncation=lowerCAmelCase , **lowerCAmelCase )
return inputs
def a__( self : Optional[int] , lowerCAmelCase : str , **lowerCAmelCase : Dict )-> str:
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase , UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase , UpperCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase = output_ids.reshape(lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=ReturnType.TEXT , lowerCAmelCase : Tuple=False )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "summary"
def __call__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Union[str, Any] )-> Dict:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "translation"
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a__( self : int , *lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None )-> Any:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase , return_tensors=self.framework , truncation=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase )
else:
return super()._parse_and_tokenize(*lowerCAmelCase , truncation=lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = super()._sanitize_parameters(**lowerCAmelCase )
if src_lang is not None:
UpperCAmelCase = src_lang
if tgt_lang is not None:
UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase = kwargs.get('''task''' , self.task )
UpperCAmelCase = task.split('''_''' )
if task and len(lowerCAmelCase ) == 4:
# translation, XX, to YY
UpperCAmelCase = items[1]
UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
| 210
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: int = StableUnCLIPPipeline
A: List[str] = TEXT_TO_IMAGE_PARAMS
A: Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
A: Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
A: str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A: str = False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict = 32
UpperCamelCase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCamelCase__ : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase__ , projection_dim=lowerCamelCase__ , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCamelCase__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase__ : Any = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=lowerCamelCase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase__ )
UpperCamelCase__ : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase__ , layers_per_block=1 , upcast_attention=lowerCamelCase__ , use_linear_projection=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ : str = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = AutoencoderKL()
UpperCamelCase__ : List[str] = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=0 ) -> str:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('''mps''' ):
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ : Tuple = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase__ )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCamelCase__ : Any = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : Tuple = pipe('''anime turle''' , generator=lowerCamelCase__ , output_type='''np''' )
UpperCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ : Dict = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCamelCase__ : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ : Optional[Any] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 706
|
__UpperCamelCase : List[Any] = 256
# Modulus to hash a string
__UpperCamelCase : Union[str, Any] = 100_0003
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
UpperCamelCase__ : Any = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = '''abc1abc12'''
UpperCamelCase__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase__ : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
UpperCamelCase__ : Optional[int] = '''ABABX'''
UpperCamelCase__ : int = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
UpperCamelCase__ : int = '''AAAB'''
UpperCamelCase__ : str = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
UpperCamelCase__ : Union[str, Any] = '''abcdabcy'''
UpperCamelCase__ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
UpperCamelCase__ : Tuple = '''Lü'''
UpperCamelCase__ : Any = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 106
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class a ( UpperCamelCase_ ):
__lowercase = """encoder-decoder"""
__lowercase = True
def __init__( self , **__UpperCamelCase )-> Tuple:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ : Any =kwargs.pop('''encoder''' )
A__ : List[Any] =encoder_config.pop('''model_type''' )
A__ : Tuple =kwargs.pop('''decoder''' )
A__ : List[str] =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
A__ : List[Any] =AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A__ : int =AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A__ : Optional[Any] =True
@classmethod
def lowerCAmelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
A__ : Optional[int] =True
A__ : List[str] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
A__ : Dict =self.encoder.to_dict()
A__ : Any =self.decoder.to_dict()
A__ : Optional[int] =self.__class__.model_type
return output
| 416
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( UpperCamelCase_ ,unittest.TestCase ):
__lowercase = VideoToVideoSDPipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
__lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowercase = False
# No `output_type`.
__lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : str =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : Dict =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
A__ : List[Any] =CLIPTextModel(__UpperCamelCase )
A__ : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> Tuple:
'''simple docstring'''
A__ : Any =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('''mps''' ):
A__ : Dict =torch.manual_seed(__UpperCamelCase )
else:
A__ : List[Any] =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ : Optional[int] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
A__ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : Dict =self.get_dummy_components()
A__ : str =VideoToVideoSDPipeline(**__UpperCamelCase )
A__ : Tuple =sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : List[str] =self.get_dummy_inputs(__UpperCamelCase )
A__ : Any ='''np'''
A__ : Optional[Any] =sd_pipe(**__UpperCamelCase ).frames
A__ : Optional[int] =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A__ : List[Any] =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
A__ : Dict =VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ : List[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
A__ : Any =torch.randn((1, 10, 3, 10_24, 5_76) , generator=__UpperCamelCase )
A__ : Tuple =video.to('''cuda''' )
A__ : List[Any] ='''Spiderman is surfing'''
A__ : int =pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='''pt''' ).frames
A__ : List[Any] =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 416
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case : Optional[int] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__snake_case : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
__snake_case : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case : int = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__snake_case : Tuple = "allenai"
def _lowercase ( lowerCamelCase__ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_a = dict((re.sub(R"@@$", "", lowerCamelCase__ ), v) if k.endswith("@@" ) else (re.sub(R"$", "</w>", lowerCamelCase__ ), v) for k, v in d.items() )
_a = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_a = d[k] # restore
return da
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ):
# prep
assert os.path.exists(lowerCamelCase__ )
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_a = basename(lowerCamelCase__ )
_a = dirname(lowerCamelCase__ )
_a = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_a = cls.hub_models()
_a = {"bpe": "fastbpe", "tokenizer": "moses"}
_a = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_a = hub_utils.from_pretrained(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, archive_map=lowerCamelCase__, **lowerCamelCase__ )
_a = vars(chkpt["args"]["model"] )
_a = args["source_lang"]
_a = args["target_lang"]
_a = dirname(lowerCamelCase__ )
_a = basename(lowerCamelCase__ )
# dicts
_a = os.path.join(lowerCamelCase__, F'''dict.{src_lang}.txt''' )
_a = os.path.join(lowerCamelCase__, F'''dict.{tgt_lang}.txt''' )
_a = Dictionary.load(lowerCamelCase__ )
_a = rewrite_dict_keys(src_dict.indices )
_a = len(lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_a = True
for k in src_vocab.keys():
if not k.islower():
_a = False
break
_a = Dictionary.load(lowerCamelCase__ )
_a = rewrite_dict_keys(tgt_dict.indices )
_a = len(lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# merges_file (bpecodes)
_a = os.path.join(lowerCamelCase__, VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ):
break
with open(lowerCamelCase__, encoding="utf-8" ) as fin:
_a = fin.read()
_a = re.sub(R" \d+$", "", lowerCamelCase__, 0, re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as fout:
fout.write(lowerCamelCase__ )
# model config
_a = os.path.join(lowerCamelCase__, "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_a = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
_a = 5
_a = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_a = best_score_hparams[model_dir]["length_penalty"]
else:
_a = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# tokenizer config
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
_a = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# model
_a = chkpt["models"][0]
_a = model.state_dict()
# rename keys to start with 'model.'
_a = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_a = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase__, lowerCamelCase__ )
_a = FSMTConfig.from_pretrained(lowerCamelCase__ )
_a = FSMTForConditionalGeneration(lowerCamelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
# save
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowerCamelCase__, lowerCamelCase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 721
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case: Tuple = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _snake_case ( A_ : str = "dhaka" , A_ : int = 5 ):
"""simple docstring"""
a_ : Any = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
a_ : Optional[int] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
a_ : int = requests.get("""https://www.google.com/search""" , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = BeautifulSoup(html.text , """html.parser""" )
a_ : Optional[int] = "".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
a_ : List[str] = json.dumps(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = json.loads(_SCREAMING_SNAKE_CASE )
a_ : Dict = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
a_ : Optional[Any] = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(_SCREAMING_SNAKE_CASE ) , )
a_ : Dict = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
a_ : str = bytes(_SCREAMING_SNAKE_CASE , """ascii""" ).decode(
"""unicode-escape""" )
a_ : Any = bytes(_SCREAMING_SNAKE_CASE , """ascii""" ).decode(
"""unicode-escape""" )
a_ : int = urllib.request.build_opener()
a_ : Dict = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = f'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__snake_case: Union[str, Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 577
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
__lowerCamelCase = 'test'
class a__ :
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowercase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-100 , lowercase=0 , lowercase=True , ) -> List[InputFeatures]:
'''simple docstring'''
A__ = {label: i for i, label in enumerate(lowercase )}
A__ = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" , lowercase , len(lowercase ) )
A__ = []
A__ = []
for word, label in zip(example.words , example.labels ):
A__ = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A__ = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
A__ = tokens[: (max_seq_length - special_tokens_count)]
A__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A__ = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A__ = [cls_token] + tokens
A__ = [pad_token_label_id] + label_ids
A__ = [cls_token_segment_id] + segment_ids
A__ = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A__ = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
A__ = max_seq_length - len(lowercase )
if pad_on_left:
A__ = ([pad_token] * padding_length) + input_ids
A__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A__ = ([pad_token_segment_id] * padding_length) + segment_ids
A__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(lowercase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(lowercase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(lowercase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(lowercase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A__ = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> Optional[int]:
'''simple docstring'''
A__ = os.path.join(
lowercase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
A__ = torch.load(lowercase )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
A__ = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A__ = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowercase )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = -100
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> List[Any]:
'''simple docstring'''
A__ = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A__ = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A__ = tf.data.Dataset.from_generator(
lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A__ = tf.data.Dataset.from_generator(
lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 626
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__lowerCamelCase = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__lowerCamelCase = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 626
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ : Tuple = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Any = '''layoutlmv3'''
def __init__( self , SCREAMING_SNAKE_CASE=5_0_2_6_5 , SCREAMING_SNAKE_CASE=7_6_8 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=3_0_7_2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1_0_2_4 , SCREAMING_SNAKE_CASE=1_2_8 , SCREAMING_SNAKE_CASE=1_2_8 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=1_2_8 , SCREAMING_SNAKE_CASE=6_4 , SCREAMING_SNAKE_CASE=2_5_6 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=2_2_4 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
super().__init__(
vocab_size=SCREAMING_SNAKE_CASE , hidden_size=SCREAMING_SNAKE_CASE , num_hidden_layers=SCREAMING_SNAKE_CASE , num_attention_heads=SCREAMING_SNAKE_CASE , intermediate_size=SCREAMING_SNAKE_CASE , hidden_act=SCREAMING_SNAKE_CASE , hidden_dropout_prob=SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=SCREAMING_SNAKE_CASE , max_position_embeddings=SCREAMING_SNAKE_CASE , type_vocab_size=SCREAMING_SNAKE_CASE , initializer_range=SCREAMING_SNAKE_CASE , layer_norm_eps=SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
a__ = max_ad_position_embeddings
a__ = coordinate_size
a__ = shape_size
a__ = has_relative_attention_bias
a__ = rel_pos_bins
a__ = max_rel_pos
a__ = has_spatial_attention_bias
a__ = rel_ad_pos_bins
a__ = max_rel_ad_pos
a__ = text_embed
a__ = visual_embed
a__ = input_size
a__ = num_channels
a__ = patch_size
a__ = classifier_dropout
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : int = version.parse('''1.12''' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
return 1_2
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 4_0 , SCREAMING_SNAKE_CASE = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ = processor.tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
a__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
a__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ = self._generate_dummy_images(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = dict(
processor(
SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE , boxes=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , ) )
return inputs
| 194
|
a_ : str = 6_55_21
def __a ( __UpperCAmelCase ):
a__ = 1
a__ = 0
for plain_chr in plain_text:
a__ = (a + ord(__UpperCAmelCase )) % MOD_ADLER
a__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 194
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
SCREAMING_SNAKE_CASE_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = fa_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ )
return {"f1": float(snake_case_ ) if score.size == 1 else score}
| 717
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
| 0
|
def lowercase ( _lowerCAmelCase ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase__ = [True] * (num + 1)
UpperCAmelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCAmelCase ):
UpperCAmelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Tuple = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 392
|
import numpy
# List of input, output pairs
snake_case__ : Optional[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
snake_case__ : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
snake_case__ : List[Any] = [2, 4, 1, 5]
snake_case__ : int = len(train_data)
snake_case__ : List[Any] = 0.0_09
def lowercase ( _lowerCAmelCase , _lowerCAmelCase="train" ):
return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output(
_lowerCAmelCase , _lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = 0
for i in range(len(_lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=m ):
UpperCAmelCase__ = 0
for i in range(_lowerCAmelCase ):
if index == -1:
summation_value += _error(_lowerCAmelCase )
else:
summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m
return cost_derivative_value
def lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ = 0.000002
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
while True:
j += 1
UpperCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(_lowerCAmelCase ) ):
UpperCAmelCase__ = get_cost_derivative(i - 1 )
UpperCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ):
break
UpperCAmelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowercase ( ):
for i in range(len(_lowerCAmelCase ) ):
print(("""Actual output value:""", output(_lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 392
| 1
|
"""simple docstring"""
from string import ascii_uppercase
__SCREAMING_SNAKE_CASE = {str(ord(c) - 55): c for c in ascii_uppercase}
def A_ ( __lowercase , __lowercase ):
if isinstance(__lowercase , __lowercase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowercase , __lowercase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowercase , __lowercase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
UpperCamelCase_ : List[Any] =''
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : Optional[int] =0
while div != 1:
UpperCamelCase_ : Optional[int] =divmod(__lowercase , __lowercase )
if base >= 11 and 9 < mod < 36:
UpperCamelCase_ : Optional[int] =ALPHABET_VALUES[str(__lowercase )]
else:
UpperCamelCase_ : List[str] =str(__lowercase )
new_value += actual_value
UpperCamelCase_ : str =num // base
UpperCamelCase_ : Tuple =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowercase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE = TaTokenizerFast
__SCREAMING_SNAKE_CASE = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 395
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
A: Optional[int] = False
A: List[Any] = True
A: List[Any] = False
if __name__ == "__main__":
A: Tuple = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
A: Union[str, Any] = parser.parse_args()
A: Dict = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
A: Union[str, Any] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
A: int = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
A: List[Any] = reader.read()
A: Optional[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
A: Optional[Any] = UNetaDModel(**config)
else:
A: Any = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
A: List[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
A: Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
A: List[Any] = config[key]
del config[key]
A: Dict = [k.replace("UNetRes", "") for k in config["down_block_types"]]
A: int = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
A: Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
A: str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
A: List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
A: Tuple = param_value
A: str = True
if not has_changed:
A: Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 160
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=2 , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase : str = (image_size // patch_size) ** 2
UpperCAmelCase : Dict = num_patches + 2
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = TFDeiTModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] = TFDeiTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[str] = TFDeiTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : Dict = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Dict = 1
UpperCAmelCase : Tuple = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : int = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFDeiTModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] = TFDeiTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _snake_case ( ):
UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
UpperCAmelCase : Dict = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 160
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool , lowerCamelCase : list[int] , lowerCamelCase : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
)
def a_ ( ):
lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase = math.log(len(lowerCamelCase ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711
|
'''simple docstring'''
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 513
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Tuple = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:int = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Tuple = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case:
'''simple docstring'''
_UpperCAmelCase = field(
metadata={"help": "The output directory where the model will be written."} , )
_UpperCAmelCase = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don\'t set if you want to train an encoder model from scratch."
)
} , )
_UpperCAmelCase = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don\'t set if you want to train a decoder model from scratch."
)
} , )
_UpperCAmelCase = field(
default=A_ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase = field(
default=A_ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _lowercase ( ) -> List[str]:
'''simple docstring'''
__A : Union[str, Any] = HfArgumentParser((ModelArguments,) )
((__A ) , ) : Union[str, Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__A : str = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__A : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__A : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__A : Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__A : List[Any] = True
__A : List[str] = True
__A : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_snake_case , decoder_config=_snake_case , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__A : Dict = decoder_config.decoder_start_token_id
__A : List[Any] = decoder_config.pad_token_id
if decoder_start_token_id is None:
__A : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__A : Any = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__A : int = decoder_config.eos_token_id
__A : Dict = decoder_start_token_id
__A : Dict = pad_token_id
__A : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__A : Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__A : Union[str, Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 708
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase : Optional[Any] ='''src/transformers'''
lowerCamelCase : Optional[int] ='''docs/source/en'''
lowerCamelCase : Dict ='''.'''
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> List[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Find the start prompt.
__A : List[str] = 0
while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
__A : Optional[int] = start_index
while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase : List[str] ='''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[int] =re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase : Optional[int] =re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Optional[int] =re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Optional[Any] =direct_transformers_import(TRANSFORMERS_PATH)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
__A : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
__A : Union[str, Any] = 2 if text == '✅' or text == '❌' else len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = (width - text_length) // 2
__A : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
__A : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__A : List[str] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__A : Tuple = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[str] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once).
for attr_name in dir(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = None
if attr_name.endswith('Tokenizer' ):
__A : List[str] = slow_tokenizers
__A : Dict = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__A : Dict = fast_tokenizers
__A : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : int = tf_models
__A : Dict = _re_tf_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Tuple = flax_models
__A : Union[str, Any] = _re_flax_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Optional[int] = pt_models
__A : str = _re_pt_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(_SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_name_to_prefix.values():
__A : int = True
break
# Try again after removing the last word in the name
__A : Any = ''.join(camel_case_split(_SCREAMING_SNAKE_CASE )[:-1] )
# Let's build that table!
__A : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__A : Any = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__A : Tuple = [len(_SCREAMING_SNAKE_CASE ) + 2 for c in columns]
__A : int = max([len(_SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2
# Build the table per se
__A : Any = '|' + '|'.join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__A : int = {True: '✅', False: '❌'}
for name in model_names:
__A : str = model_name_to_prefix[name]
__A : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for l, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + "|\n"
return table
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str]=False ) -> Any:
'''simple docstring'''
__A , __A , __A , __A : Tuple = _find_text_in_file(
filename=os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__A : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 237
| 0
|
def _snake_case ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowerCAmelCase = generate_large_matrix()
_lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _snake_case ( __snake_case ):
assert all(row == sorted(__snake_case , reverse=__snake_case ) for row in grid )
assert all(list(__snake_case ) == sorted(__snake_case , reverse=__snake_case ) for col in zip(*__snake_case ) )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase = (left + right) // 2
_UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase = mid + 1
else:
_UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(grid[0] )
for i in range(len(__snake_case ) ):
_UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__snake_case ) * len(grid[0] )) - total
def _snake_case ( __snake_case ):
return len([number for row in grid for number in row if number < 0] )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
for row in grid:
for i, number in enumerate(__snake_case ):
if number < 0:
total += len(__snake_case ) - i
break
return total
def _snake_case ( ):
from timeit import timeit
print('''Running benchmarks''' )
_UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase = timeit(f"""{func}(grid=grid)""" , setup=__snake_case , number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 10
|
import string
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = ''
for i in sequence:
_snake_case : Tuple = ord(lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = string.ascii_letters
_snake_case : List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase )] if c in letters else c for c in sequence )
def lowerCamelCase_ ( )-> None:
from timeit import timeit
print('Running performance benchmarks...' )
_snake_case : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 411
| 0
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a__ = get_logger(__name__)
class _lowerCAmelCase ( enum.Enum ):
"""simple docstring"""
_lowercase : Tuple = '''all_checks'''
_lowercase : Dict = '''basic_checks'''
_lowercase : Tuple = '''no_checks'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Optional[int]=None ):
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
snake_case__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case__ = """ for """ + verification_name if verification_name is not None else """"""
if len(lowerCamelCase_ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( a : Any , a : int ):
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
if len(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCamelCase_ ) - set(lowerCamelCase_ ) ) )
snake_case__ = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCamelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCamelCase_ ) )
logger.info("""All the splits matched successfully.""" )
def _UpperCAmelCase ( a : Optional[int] , a : Optional[Any] = True ):
if record_checksum:
snake_case__ = shaaaa()
with open(lowerCamelCase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowerCamelCase_ )
snake_case__ = m.hexdigest()
else:
snake_case__ = None
return {"num_bytes": os.path.getsize(lowerCamelCase_ ), "checksum": checksum}
def _UpperCAmelCase ( a : Dict ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 713
|
def _UpperCAmelCase ( a : int = 400_0000 ):
snake_case__ = [0, 1]
snake_case__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case__ = 0
for j in range(len(a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 99
| 0
|
from importlib import import_module
from .logging import get_logger
_lowercase : Optional[Any] =get_logger(__name__)
class UpperCamelCase_ :
def __init__( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=None ):
lowerCamelCase_ : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCamelCase_ : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class UpperCamelCase_ :
_a : int = []
def __init__( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Union[str, Any]=None ):
lowerCamelCase_ : Tuple = obj
lowerCamelCase_ : Optional[Any] = target
lowerCamelCase_ : Tuple = new
lowerCamelCase_ : Any = target.split('.' )[0]
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : List[str] = attrs or []
def __enter__( self : Union[str, Any] ):
lowerCamelCase_ : Any = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
lowerCamelCase_ : int = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase_ : Tuple = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase_ : str = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
lowerCamelCase_ : int = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
lowerCamelCase_ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase_ : str = getattr(import_module('.'.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
lowerCamelCase_ : Tuple = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase_ : List[str] = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self : Optional[Any] , *lowerCamelCase : List[str] ):
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __a ( self : Optional[Any] ):
self.__enter__()
self._active_patches.append(self )
def __a ( self : Any ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 364
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = ["pixel_values"]
def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Dict = size if size is not None else {"""shortest_edge""": 2_24}
lowerCAmelCase__ : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : List[str] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : int = do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : str , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : int = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ) -> Any:
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Dict = size if size is not None else self.size
lowerCAmelCase__ : int = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
lowerCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Union[str, Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : List[str] = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : int = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[int] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
lowerCAmelCase__ : List[str] = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 507
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = None
# source code of `config_class`
lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : Optional[Any] = ckpt_name
break
return checkpoint
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 507
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]=0 ):
a__ = []
for old_item in old_list:
a__ = old_item.replace('in_layers.0' , 'norm1' )
a__ = new_item.replace('in_layers.2' , 'conv1' )
a__ = new_item.replace('out_layers.0' , 'norm2' )
a__ = new_item.replace('out_layers.3' , 'conv2' )
a__ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
a__ = new_item.replace('skip_connection' , 'conv_shortcut' )
a__ = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=0 ):
a__ = []
for old_item in old_list:
a__ = old_item
a__ = new_item.replace('norm.weight' , 'group_norm.weight' )
a__ = new_item.replace('norm.bias' , 'group_norm.bias' )
a__ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
a__ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
a__ = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=None ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
a__ = old_checkpoint[path]
a__ = old_tensor.shape[0] // 3
a__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
a__ = old_tensor.shape[0] // config['num_head_channels'] // 3
a__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
a__ , a__ , a__ = old_tensor.split(channels // num_heads , dim=1 )
a__ = query.reshape(_SCREAMING_SNAKE_CASE )
a__ = key.reshape(_SCREAMING_SNAKE_CASE )
a__ = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
a__ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
a__ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
a__ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
a__ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
a__ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
a__ = old_checkpoint[path['old']][:, :, 0]
else:
a__ = old_checkpoint[path['old']]
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
a__ = {}
a__ = checkpoint['time_embed.0.weight']
a__ = checkpoint['time_embed.0.bias']
a__ = checkpoint['time_embed.2.weight']
a__ = checkpoint['time_embed.2.bias']
a__ = checkpoint['input_blocks.0.0.weight']
a__ = checkpoint['input_blocks.0.0.bias']
a__ = checkpoint['out.0.weight']
a__ = checkpoint['out.0.bias']
a__ = checkpoint['out.2.weight']
a__ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
a__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
a__ = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
a__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
a__ = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
a__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
a__ = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 , _SCREAMING_SNAKE_CASE ):
a__ = (i - 1) // (config['num_res_blocks'] + 1)
a__ = (i - 1) % (config['num_res_blocks'] + 1)
a__ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
a__ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
a__ = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
a__ = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
a__ = {'old': F'input_blocks.{i}.0', 'new': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
a__ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
a__ = renew_attention_paths(_SCREAMING_SNAKE_CASE )
a__ = {
'old': F'input_blocks.{i}.1',
'new': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
a__ = {
F'input_blocks.{i}.1.qkv.bias': {
'key': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'key': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , )
a__ = middle_blocks[0]
a__ = middle_blocks[1]
a__ = middle_blocks[2]
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
a__ = renew_attention_paths(_SCREAMING_SNAKE_CASE )
a__ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
a__ = i // (config['num_res_blocks'] + 1)
a__ = i % (config['num_res_blocks'] + 1)
a__ = [shave_segments(_SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
a__ = {}
for layer in output_block_layers:
a__ , a__ = layer.split('.' )[0], shave_segments(_SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
a__ = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
a__ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
a__ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
a__ = {'old': F'output_blocks.{i}.0', 'new': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
a__ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
a__ = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
a__ = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
a__ = []
if len(_SCREAMING_SNAKE_CASE ):
a__ = renew_attention_paths(_SCREAMING_SNAKE_CASE )
a__ = {
'old': F'output_blocks.{i}.1',
'new': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
a__ = {
F'output_blocks.{i}.1.qkv.bias': {
'key': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'key': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_SCREAMING_SNAKE_CASE , )
else:
a__ = renew_resnet_paths(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
a__ = '.'.join(['output_blocks', str(_SCREAMING_SNAKE_CASE ), path['old']] )
a__ = '.'.join(['up_blocks', str(_SCREAMING_SNAKE_CASE ), 'resnets', str(_SCREAMING_SNAKE_CASE ), path['new']] )
a__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case : Union[str, Any] = parser.parse_args()
snake_case : List[str] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case : Tuple = json.loads(f.read())
snake_case : Optional[Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case : List[Any] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case : Optional[Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 335
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase = {
'ctrl': 256,
}
UpperCAmelCase = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = CONTROL_CODES
def __init__( self , A_ , A_ , A_="<unk>" , **A_ ) -> int:
super().__init__(unk_token=A_ , **A_ )
with open(A_ , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(A_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCAmelCase = {}
@property
def __snake_case ( self ) -> Optional[Any]:
return len(self.encoder )
def __snake_case ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , A_ ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(A_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(A_ ):
try:
lowerCAmelCase = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(A_ )
lowerCAmelCase = new_word
if len(A_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(A_ )
lowerCAmelCase = """@@ """.join(A_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = []
lowerCAmelCase = re.findall(r"""\S+\n?""" , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(""" """ ) ) )
return split_tokens
def __snake_case ( self , A_ ) -> Union[str, Any]:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , A_ ) -> Optional[int]:
return self.decoder.get(A_ , self.unk_token )
def __snake_case ( self , A_ ) -> Any:
lowerCAmelCase = """ """.join(A_ ).replace("""@@ """ , """""" ).strip()
return out_string
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" )
lowerCAmelCase = 0
with open(A_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase = token_index
writer.write(""" """.join(A_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 433
| 0
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase = logging.getLogger(__name__)
class __snake_case:
'''simple docstring'''
def __init__( self ) -> Any:
lowerCAmelCase = False
def __snake_case ( self , A_ , A_ , A_ , A_ ) -> Optional[int]:
if not self.initialized:
lowerCAmelCase = RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
lowerCAmelCase = True
def __snake_case ( self ) -> Tuple:
self.retriever.index.init_index()
def __snake_case ( self , A_ , A_ ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> List[str]:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def __snake_case ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __snake_case ( self , A_ , A_ ) -> str:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase, lowerCAmelCase = ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
lowerCAmelCase, lowerCAmelCase = self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def __snake_case ( cls , A_ , A_=None , **A_ ) -> Union[str, Any]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def __snake_case ( cls , A_ , A_ , A_=None , **A_ ) -> Any:
lowerCAmelCase = kwargs.pop("""config""" , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
lowerCAmelCase = RagTokenizer.from_pretrained(A_ , config=A_ )
lowerCAmelCase = rag_tokenizer.question_encoder
lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase = """custom"""
lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , A_ )
else:
lowerCAmelCase = cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 713
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase = TextIteratorStreamer(A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase = tokenizer(A_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = TextIteratorStreamer(A_ , timeout=0.0_0_1 )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
| 344
| 0
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
__lowerCamelCase , __lowerCamelCase : List[Any] = head.next, head
while fast and fast.next:
__lowerCamelCase : Union[str, Any] = fast.next.next
__lowerCamelCase : Union[str, Any] = slow.next
__lowerCamelCase : List[str] = slow.next
__lowerCamelCase : Any = None # Don't forget here! But forget still works!
# reverse the second part
__lowerCamelCase : List[str] = None
while second:
__lowerCamelCase : List[Any] = second.next
__lowerCamelCase : Optional[Any] = node
__lowerCamelCase : str = second
__lowerCamelCase : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__lowerCamelCase : List[Any] = node.next
__lowerCamelCase : Tuple = head.next
return True
def lowercase_ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__lowerCamelCase : Union[str, Any] = head
while fast and fast.next:
__lowerCamelCase , __lowerCamelCase : Dict = fast.next.next, slow.next
# 2. Push the second half into the stack
__lowerCamelCase : Optional[int] = [slow.val]
while slow.next:
__lowerCamelCase : Optional[int] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__lowerCamelCase : Any = cur.next
return True
def lowercase_ ( _lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
if not head or not head.next:
return True
__lowerCamelCase : Any = {}
__lowerCamelCase : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
__lowerCamelCase : Optional[int] = [pos]
__lowerCamelCase : Tuple = head.next
pos += 1
__lowerCamelCase : Optional[int] = pos - 1
__lowerCamelCase : str = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
__lowerCamelCase : Tuple = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( a__ ):
snake_case__ = "deformable_detr"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=3 , UpperCAmelCase : Any=300 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : int=6 , UpperCAmelCase : Any=1024 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.2_5 , UpperCAmelCase : str=False , **UpperCAmelCase : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Tuple = backbone_config.get("model_type" )
__lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : Tuple = use_timm_backbone
__lowerCamelCase : Any = backbone_config
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : Any = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Union[str, Any] = dropout
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : Dict = init_xavier_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : int = auxiliary_loss
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : int = backbone
__lowerCamelCase : Union[str, Any] = use_pretrained_backbone
__lowerCamelCase : Any = dilation
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : Tuple = encoder_n_points
__lowerCamelCase : Dict = decoder_n_points
__lowerCamelCase : Tuple = two_stage
__lowerCamelCase : Any = two_stage_num_proposals
__lowerCamelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Tuple = mask_loss_coefficient
__lowerCamelCase : Tuple = dice_loss_coefficient
__lowerCamelCase : Optional[Any] = bbox_loss_coefficient
__lowerCamelCase : List[str] = giou_loss_coefficient
__lowerCamelCase : List[Any] = eos_coefficient
__lowerCamelCase : List[Any] = focal_alpha
__lowerCamelCase : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Any ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.d_model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 646
| 1
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
a__ = BigBirdConfig.from_json_file(UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
a__ = BigBirdForQuestionAnswering(UpperCamelCase )
else:
a__ = BigBirdForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase , UpperCamelCase , is_trivia_qa=UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__lowerCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 720
|
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
a__ = logging.get_logger()
# the current default level is logging.WARNING
a__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__magic_name__ )
def _UpperCamelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = logging.get_verbosity()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__magic_name__ ) as cl:
logger.warning(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__magic_name__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = os.getenv('''TRANSFORMERS_VERBOSITY''' , __magic_name__ )
a__ = logging.log_levels[env_level_str]
a__ = logging.get_verbosity()
self.assertEqual(
__magic_name__ , __magic_name__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
a__ = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def _UpperCamelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.logging.getLogger()
with CaptureLogger(__magic_name__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
a__ = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__magic_name__ ) as cl:
logger.warning_advice(__magic_name__ )
self.assertEqual(cl.out , msg + '''\n''' )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 158
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : str , *_UpperCAmelCase : str , **_UpperCAmelCase : Optional[int] ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : int ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7
| 1
|
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = iter(__snake_case )
while True:
__lowerCAmelCase = tuple(itertools.islice(__snake_case , __snake_case ) )
if not chunk:
return
yield chunk
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCAmelCase = ""
if len(__snake_case ) < 2:
return dirty
for i in range(len(__snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__snake_case ) & 1:
clean += "X"
return clean
def __lowerCAmelCase ( __snake_case ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCAmelCase = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__snake_case )
return table
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = prepare_input(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 702
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __snake_case ).groups()[0]
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Dict:
__lowerCAmelCase = file_names
__lowerCAmelCase = image_transform
__lowerCAmelCase = label_to_id
def __len__( self )-> Optional[int]:
return len(self.file_names )
def __getitem__( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = self.file_names[idx]
__lowerCAmelCase = PIL.Image.open(__UpperCamelCase )
__lowerCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
__lowerCAmelCase = self.image_transform(__UpperCamelCase )
__lowerCAmelCase = extract_label(__UpperCamelCase )
if self.label_to_id is not None:
__lowerCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( __snake_case , __snake_case ):
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = config["image_size"]
if not isinstance(__snake_case , (list, tuple) ):
__lowerCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Grab all the image filenames
__lowerCAmelCase = [os.path.join(args.data_dir , __snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase = int(0.8 * len(__snake_case ) )
__lowerCAmelCase = random_perm[:cut]
__lowerCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase = Compose([RandomResizedCrop(__snake_case , scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__snake_case , label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__snake_case , label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = create_model("resnet50d" , pretrained=__snake_case , num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase = False
for param in model.get_classifier().parameters():
__lowerCAmelCase = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase = OneCycleLR(optimizer=__snake_case , max_lr=__snake_case , epochs=__snake_case , steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
__lowerCAmelCase = None
else:
__lowerCAmelCase = int(training_difference.replace("step_" , "" ) )
__lowerCAmelCase = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case , __snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase = accelerator.skip_first_batches(__snake_case , __snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = torch.nn.functional.cross_entropy(__snake_case , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} , step=__snake_case , )
if checkpointing_steps == "epoch":
__lowerCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__snake_case , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__snake_case , default=__snake_case , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__snake_case , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 290
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (_a ):
def __init__( self : Optional[int] , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __lowerCamelCase , )
super().__init__(args=__lowerCamelCase , **__lowerCamelCase )
| 615
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (_a , unittest.TestCase ):
lowercase_ : Optional[Any] = DebertaVaTokenizer
lowercase_ : Any = DebertaVaTokenizerFast
lowercase_ : List[str] = True
lowercase_ : str = True
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCAmelCase__ = '''this is a test'''
lowerCAmelCase__ = '''this is a test'''
return input_text, output_text
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(__lowerCamelCase ) , 3_00_01 )
def A__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = ''' \tHeLLo!how \n Are yoU? '''
lowerCAmelCase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def A__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def A__ ( self : Dict ):
"""simple docstring"""
pass
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[int] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[int] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Tuple ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = ''' \tHeLLo!how \n Are yoU? '''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = '''This is a test'''
lowerCAmelCase__ = [13, 1, 43_98, 25, 21, 12_89]
lowerCAmelCase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
lowerCAmelCase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 615
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple=13 , __magic_name__ : List[Any]=7 , __magic_name__ : List[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : List[str]=True , __magic_name__ : str=99 , __magic_name__ : Any=64 , __magic_name__ : List[Any]=5 , __magic_name__ : List[Any]=4 , __magic_name__ : List[Any]=64 , __magic_name__ : Tuple="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Any=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Dict=3 , __magic_name__ : Tuple=4 , __magic_name__ : Optional[int]=None , ) -> Any:
lowerCamelCase_ : str = parent
lowerCamelCase_ : List[Any] = batch_size
lowerCamelCase_ : str = seq_length
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : List[str] = use_input_mask
lowerCamelCase_ : Optional[int] = use_token_type_ids
lowerCamelCase_ : Any = use_labels
lowerCamelCase_ : Any = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : List[Any] = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = num_labels
lowerCamelCase_ : int = num_choices
lowerCamelCase_ : List[str] = scope
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : int = None
if self.use_labels:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> str:
lowerCamelCase_ : Optional[Any] = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(lowercase_ , lowercase_ )
lowerCamelCase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any ) -> Dict:
lowerCamelCase_ : Union[str, Any] = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> List[Any]:
lowerCamelCase_ : Any = self.num_labels
lowerCamelCase_ : Tuple = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> Dict:
lowerCamelCase_ : List[str] = self.num_choices
lowerCamelCase_ : List[str] = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Any:
lowerCamelCase_ : Union[str, Any] = self.num_labels
lowerCamelCase_ : Any = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
(lowerCamelCase_) : int = config_and_inputs
lowerCamelCase_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
lowerCamelCase_ : List[Any] = MPNetModelTester(self )
lowerCamelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ : List[str] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowerCamelCase_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ : List[Any] = model(lowercase_ )[0]
lowerCamelCase_ : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
lowerCamelCase_ : List[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 708
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( __UpperCAmelCase : int ) -> str:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCamelCase_ : str = precision
lowerCamelCase_ : Optional[Any] = ceil(precision / 14 )
lowerCamelCase_ : Tuple = 426880 * Decimal(10005 ).sqrt()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : List[Any] = 13591409
lowerCamelCase_ : List[str] = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
lowerCamelCase_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case_ : Optional[int] = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 253
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[int]=1_0 , __lowerCamelCase : Tuple=[1_0, 2_0, 3_0, 4_0] , __lowerCamelCase : str=[1, 1, 2, 1] , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : int=None , ):
UpperCAmelCase__ :Optional[Any] = parent
UpperCAmelCase__ :str = batch_size
UpperCAmelCase__ :str = image_size
UpperCAmelCase__ :Dict = num_channels
UpperCAmelCase__ :int = embeddings_size
UpperCAmelCase__ :Optional[int] = hidden_sizes
UpperCAmelCase__ :Optional[Any] = depths
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Union[str, Any] = hidden_act
UpperCAmelCase__ :int = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = len(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ :int = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
UpperCAmelCase__ :List[Any] = FlaxRegNetModel(config=__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
UpperCAmelCase__ :Optional[Any] = self.num_labels
UpperCAmelCase__ :Any = FlaxRegNetForImageClassification(config=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ :List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = config_and_inputs
UpperCAmelCase__ :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( _snake_case , unittest.TestCase ):
UpperCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :Union[str, Any] = FlaxRegNetModelTester(self )
UpperCAmelCase__ :Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
return
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ , UpperCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ :Optional[int] = model_class(__lowerCamelCase )
UpperCAmelCase__ :str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ :Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase__ :Optional[int] = model_class(__lowerCamelCase )
UpperCAmelCase__ :Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase__ :str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ :Tuple = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ :Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ :str = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ :Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :int = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ :Union[str, Any] = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ :Optional[int] = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ):
UpperCAmelCase__ :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase__ :Optional[Any] = self.default_image_processor
UpperCAmelCase__ :Tuple = prepare_img()
UpperCAmelCase__ :Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors='''np''' )
UpperCAmelCase__ :Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase__ :str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase__ :Dict = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 467
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase ( nn.Module ):
def __init__( self : int , __lowerCamelCase : int = 1_6 , __lowerCamelCase : int = 8_8 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "geglu" , __lowerCamelCase : Optional[int] = None , ):
super().__init__()
UpperCAmelCase__ :Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__lowerCamelCase , attention_head_dim=__lowerCamelCase , in_channels=__lowerCamelCase , num_layers=__lowerCamelCase , dropout=__lowerCamelCase , norm_num_groups=__lowerCamelCase , cross_attention_dim=__lowerCamelCase , attention_bias=__lowerCamelCase , sample_size=__lowerCamelCase , num_vector_embeds=__lowerCamelCase , activation_fn=__lowerCamelCase , num_embeds_ada_norm=__lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase__ :Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase__ :int = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase__ :Any = [1, 0]
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , __lowerCamelCase : bool = True , ):
UpperCAmelCase__ :Optional[int] = hidden_states
UpperCAmelCase__ :Dict = []
UpperCAmelCase__ :Tuple = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase__ :List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase__ :Dict = self.transformer_index_for_condition[i]
UpperCAmelCase__ :Tuple = self.transformers[transformer_index](
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase , cross_attention_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase__ :Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase__ :Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__lowerCamelCase )
| 467
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase__ = get_logger(__name__)
def _A( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase = os.path.join(UpperCamelCase__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(F'Saving model to {ckpt_dir}' )
__lowercase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=UpperCamelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def _A( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=0 ) -> Tuple:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__lowercase = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Loading model from {input_model_file}' )
__lowercase = torch.load(UpperCamelCase__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Loading model from {input_model_file}' )
__lowercase = torch.load(UpperCamelCase__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase = (
os.path.join(UpperCamelCase__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
__lowercase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCamelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , planner=DefaultLoadPlanner() , )
__lowercase = state_dict["""model"""]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(UpperCamelCase__ )
def _A( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=0 ) -> Any:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase = FSDP.optim_state_dict(UpperCamelCase__ , UpperCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowercase = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
__lowercase = os.path.join(UpperCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _A( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=0 ) -> Tuple:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowercase = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
__lowercase = torch.load(UpperCamelCase__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowercase = (
os.path.join(UpperCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
__lowercase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , )
__lowercase = optim_state["""optimizer"""]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
__lowercase = FSDP.optim_state_dict_to_load(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
optimizer.load_state_dict(UpperCamelCase__ )
| 332
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase = 1_0
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: str =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_: Optional[int] =two_third + 1
else:
SCREAMING_SNAKE_CASE_: List[str] =one_third + 1
SCREAMING_SNAKE_CASE_: Optional[Any] =two_third - 1
else:
return -1
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: List[str] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase = ite_ternary_search(collection, target)
_UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 409
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = checkpoint
lowerCamelCase_ : int = {}
lowerCamelCase_ : Tuple = vae_state_dict["encoder.conv_in.weight"]
lowerCamelCase_ : Any = vae_state_dict["encoder.conv_in.bias"]
lowerCamelCase_ : Optional[Any] = vae_state_dict["encoder.conv_out.weight"]
lowerCamelCase_ : List[str] = vae_state_dict["encoder.conv_out.bias"]
lowerCamelCase_ : Union[str, Any] = vae_state_dict["encoder.norm_out.weight"]
lowerCamelCase_ : List[Any] = vae_state_dict["encoder.norm_out.bias"]
lowerCamelCase_ : Optional[int] = vae_state_dict["decoder.conv_in.weight"]
lowerCamelCase_ : List[str] = vae_state_dict["decoder.conv_in.bias"]
lowerCamelCase_ : Union[str, Any] = vae_state_dict["decoder.conv_out.weight"]
lowerCamelCase_ : Any = vae_state_dict["decoder.conv_out.bias"]
lowerCamelCase_ : Optional[int] = vae_state_dict["decoder.norm_out.weight"]
lowerCamelCase_ : Dict = vae_state_dict["decoder.norm_out.bias"]
lowerCamelCase_ : Optional[Any] = vae_state_dict["quant_conv.weight"]
lowerCamelCase_ : Union[str, Any] = vae_state_dict["quant_conv.bias"]
lowerCamelCase_ : Any = vae_state_dict["post_quant_conv.weight"]
lowerCamelCase_ : List[str] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCamelCase_ : List[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCamelCase_ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(__UpperCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
lowerCamelCase_ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCamelCase_ : Any = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(__UpperCAmelCase )
}
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Dict = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCamelCase_ : Tuple = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
lowerCamelCase_ : Tuple = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
lowerCamelCase_ : Dict = renew_vae_resnet_paths(__UpperCAmelCase )
lowerCamelCase_ : str = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
lowerCamelCase_ : Dict = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCamelCase_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase_ : Union[str, Any] = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
lowerCamelCase_ : Union[str, Any] = renew_vae_resnet_paths(__UpperCAmelCase )
lowerCamelCase_ : Tuple = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
lowerCamelCase_ : str = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCamelCase_ : List[str] = renew_vae_attention_paths(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
conv_attn_to_linear(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = num_up_blocks - 1 - i
lowerCamelCase_ : int = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCamelCase_ : List[str] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCamelCase_ : Dict = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCamelCase_ : Union[str, Any] = renew_vae_resnet_paths(__UpperCAmelCase )
lowerCamelCase_ : int = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
lowerCamelCase_ : Tuple = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCamelCase_ : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase_ : Union[str, Any] = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
lowerCamelCase_ : Optional[Any] = renew_vae_resnet_paths(__UpperCAmelCase )
lowerCamelCase_ : str = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
lowerCamelCase_ : int = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCamelCase_ : List[Any] = renew_vae_attention_paths(__UpperCAmelCase )
lowerCamelCase_ : int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase )
conv_attn_to_linear(__UpperCAmelCase )
return new_checkpoint
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCamelCase_ : Optional[Any] = io.BytesIO(r.content )
lowerCamelCase_ : str = OmegaConf.load(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = 512
lowerCamelCase_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCamelCase_ : List[str] = {}
with safe_open(__UpperCAmelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCamelCase_ : int = f.get_tensor(__UpperCAmelCase )
else:
lowerCamelCase_ : str = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )["state_dict"]
# Convert the VAE model.
lowerCamelCase_ : Optional[int] = create_vae_diffusers_config(__UpperCAmelCase , image_size=__UpperCAmelCase )
lowerCamelCase_ : Any = custom_convert_ldm_vae_checkpoint(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : int = AutoencoderKL(**__UpperCAmelCase )
vae.load_state_dict(__UpperCAmelCase )
vae.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
snake_case_ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 253
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( __UpperCAmelCase : int ) -> str:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCamelCase_ : str = precision
lowerCamelCase_ : Optional[Any] = ceil(precision / 14 )
lowerCamelCase_ : Tuple = 426880 * Decimal(10005 ).sqrt()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : List[Any] = 13591409
lowerCamelCase_ : List[str] = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
lowerCamelCase_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case_ : Optional[int] = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 253
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = GPTSanJapaneseTokenizer
snake_case__ : Dict = False
snake_case__ : Tuple = {"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_lowerCamelCase : List[Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_lowerCamelCase : Dict = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_lowerCamelCase : str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_lowerCamelCase : Tuple = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : List[Any] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_lowerCamelCase : List[str] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_lowerCamelCase : Dict = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_lowerCamelCase : int = '''こんにちは、世界。'''
_lowerCamelCase : Tuple = '''こんばんは、㔺界。😀'''
_lowerCamelCase : str = '''こんにちは、世界。こんばんは、世界。😀'''
_lowerCamelCase : List[Any] = tokenizer.encode(prefix_text + input_text )
_lowerCamelCase : List[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_lowerCamelCase : int = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_lowerCamelCase : int = '''こんにちは、世界。'''
_lowerCamelCase : List[str] = '''こんばんは、㔺界。😀'''
_lowerCamelCase : Optional[int] = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
_lowerCamelCase : Optional[int] = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
_lowerCamelCase : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_lowerCamelCase : Any = [1] * (len_prefix + len_text + 1) + [0]
_lowerCamelCase : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowerCamelCase : int = tokenizer(prefix_text + input_text ).token_type_ids
_lowerCamelCase : Union[str, Any] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_lowerCamelCase : List[Any] = tokenizer.encode('''あンいワ''' )
_lowerCamelCase : Optional[int] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_lowerCamelCase : Any = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_lowerCamelCase : Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
_lowerCamelCase : Optional[int] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_lowerCamelCase : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowerCamelCase : Optional[int] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
| 83
|
import pprint
import requests
__UpperCAmelCase = 'https://zenquotes.io/api'
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 600
| 0
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase_ : Tuple = float('''nan''')
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
A_ = sys.stdout
A_ = open(_snake_case , "a" )
def __getattr__( self : int , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self.stdout , _snake_case )
def lowerCamelCase__ ( self : Tuple , _snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.stdout.write(_snake_case )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , _snake_case , 0 , re.M ) )
def A_ (__a=80 , __a=False ):
'''simple docstring'''
A_ = []
# deal with critical env vars
A_ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
A_ = os.environ.get(__a , __a )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
A_ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__a )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A_ = []
A_ = ""
while len(__a ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(__a ) == 0 or len(__a ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__a )
A_ = ""
return "\\\n".join(__a )
def A_ (__a , __a ):
'''simple docstring'''
A_ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
A_ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A_ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A_ (__a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A_ = subprocess.run(__a , capture_output=__a , text=__a )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
A_ = variation.replace(" " , "-" )
with open(Path(__a ) / f'log.{prefix}.stdout.txt' , "w" ) as f:
f.write(result.stdout )
with open(Path(__a ) / f'log.{prefix}.stderr.txt' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f:
A_ = json.load(__a )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A_ (__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
A_ = []
A_ = []
A_ = f'{id}: {variation:<{longest_variation_len}}'
A_ = f'{preamble}: '
A_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__a ) , desc=__a , leave=__a ):
A_ = process_run_single(
__a , __a , __a , __a , __a , __a , __a )
A_ = single_run_metrics[target_metric_key]
if not math.isnan(__a ):
metrics.append(__a )
results.append(__a )
outcome += "✓"
else:
outcome += "✘"
A_ = f'\33[2K\r{outcome}'
if len(__a ) > 0:
A_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A_ = round(mean_metrics[target_metric_key] , 2 )
A_ = f'{outcome} {mean_target}'
if len(__a ) > 1:
results_str += f' {tuple(round(__a , 2 ) for x in results )}'
print(__a )
A_ = variation
return mean_metrics
else:
print(__a )
return {variation_key: variation, target_metric_key: nan}
def A_ ():
'''simple docstring'''
A_ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
A_ = pd.DataFrame(__a )
A_ = "variation"
A_ = "diff_%"
A_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__a ):
# as a fallback, use the minimal value as the sentinel
A_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__a ):
A_ = df.apply(
lambda __a : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
A_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A_ = df.reindex(__a , axis="columns" ) # reorder cols
# capitalize
A_ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
A_ = df.rename(lambda __a : c.replace("_" , "<br>" ) , axis="columns" )
A_ = df.rename(lambda __a : c.replace("_" , "\n" ) , axis="columns" )
A_ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__a , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__a , floatfmt=".2f" )]
print("\n\n".join(__a ) )
def A_ ():
'''simple docstring'''
A_ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=__a , type=__a , required=__a , help="Base cmd" , )
parser.add_argument(
"--variations" , default=__a , type=__a , nargs="+" , required=__a , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=__a , type=__a , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=__a , type=__a , required=__a , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=__a , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=__a , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=__a , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=__a , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
A_ = parser.parse_args()
A_ = args.output_dir
Path(__a ).mkdir(exist_ok=__a )
A_ = get_base_command(__a , __a )
# split each dimension into its --foo variations
A_ = [list(map(str.strip , re.split(R"\|" , __a ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A_ = list(map(str.strip , map(" ".join , itertools.product(*__a ) ) ) )
A_ = max(len(__a ) for x in variations )
# split wanted keys
A_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A_ = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
A_ = Tee(__a )
print(f'\n*** Running {len(__a )} benchmarks:' )
print(f'Base command: {" ".join(__a )}' )
A_ = "variation"
A_ = []
for id, variation in enumerate(tqdm(__a , desc="Total completion: " , leave=__a ) ):
A_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __a , __a , __a , __a , args.target_metric_key , __a , args.repeat_times , __a , args.verbose , ) )
process_results(__a , args.target_metric_key , __a , args.base_variation , __a )
if __name__ == "__main__":
main()
| 482
|
"""simple docstring"""
from collections import deque
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
A_ = process_name # process name
A_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A_ = arrival_time
A_ = burst_time # remaining burst time
A_ = 0 # total time of the process wait in ready queue
A_ = 0 # time from arrival time to completion time
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : list[int] , _snake_case : deque[Process] , _snake_case : int , ) -> None:
"""simple docstring"""
# total number of mlfq's queues
A_ = number_of_queues
# time slice of queues that round robin algorithm applied
A_ = time_slices
# unfinished process is in this ready_queue
A_ = queue
# current time
A_ = current_time
# finished process is in this sequence queue
A_ = deque()
def lowerCamelCase__ ( self : List[str] ) -> list[str]:
"""simple docstring"""
A_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase__ ( self : str , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase__ ( self : Optional[int] , _snake_case : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase__ ( self : int , _snake_case : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase__ ( self : Dict , _snake_case : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A_ = deque() # sequence deque of finished process
while len(_snake_case ) != 0:
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A_ = 0
# set the process's turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# set the completion time
A_ = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase__ ( self : List[str] , _snake_case : deque[Process] , _snake_case : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case ) ):
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A_ = 0
# set the finish time
A_ = self.current_time
# update the process' turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase__ ( self : Any ) -> deque[Process]:
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
A_ , A_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase_ : Tuple = Process('''P1''', 0, 53)
UpperCamelCase_ : List[Any] = Process('''P2''', 0, 17)
UpperCamelCase_ : Optional[int] = Process('''P3''', 0, 68)
UpperCamelCase_ : Union[str, Any] = Process('''P4''', 0, 24)
UpperCamelCase_ : Any = 3
UpperCamelCase_ : List[str] = [17, 25]
UpperCamelCase_ : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase_ : str = Process('''P1''', 0, 53)
UpperCamelCase_ : Dict = Process('''P2''', 0, 17)
UpperCamelCase_ : List[Any] = Process('''P3''', 0, 68)
UpperCamelCase_ : int = Process('''P4''', 0, 24)
UpperCamelCase_ : Dict = 3
UpperCamelCase_ : str = [17, 25]
UpperCamelCase_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
UpperCamelCase_ : str = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 482
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="Translation" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[List] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="TranslationVariableLanguages" , init=a__ , repr=a__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase__ = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase__ = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase__ , lowerCAmelCase__ = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 90
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 67
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=snake_case , )
assert hasattr(self , "env" )
def snake_case__ ( self , snake_case=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
TrainingJobAnalytics(snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case )
| 185
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def UpperCamelCase_( _A :Dict , _A :Union[str, Any] , _A :Optional[int]=8 )-> Dict:
UpperCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , movq=snake_case , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase__ = latents.to(snake_case )
UpperCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = len(snake_case ) if isinstance(snake_case , snake_case ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
snake_case , padding="max_length" , truncation=snake_case , max_length=77 , return_attention_mask=snake_case , add_special_tokens=snake_case , return_tensors="pt" , )
UpperCamelCase__ = text_inputs.input_ids
UpperCamelCase__ = self.tokenizer(snake_case , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case , snake_case ):
UpperCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ = text_input_ids.to(snake_case )
UpperCamelCase__ = text_inputs.attention_mask.to(snake_case )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=snake_case , attention_mask=snake_case )
UpperCamelCase__ = prompt_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = text_encoder_hidden_states.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = text_mask.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = 42
if negative_prompt is None:
UpperCamelCase__ = [""] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
UpperCamelCase__ = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = self.tokenizer(
snake_case , padding="max_length" , max_length=77 , truncation=snake_case , return_attention_mask=snake_case , add_special_tokens=snake_case , return_tensors="pt" , )
UpperCamelCase__ = uncond_input.input_ids.to(snake_case )
UpperCamelCase__ = uncond_input.attention_mask.to(snake_case )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=snake_case , attention_mask=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1 , snake_case )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case )
UpperCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 , snake_case , 1 )
UpperCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , snake_case , -1 )
UpperCamelCase__ = uncond_text_mask.repeat_interleave(snake_case , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
if self.safety_checker is not None:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(self.safety_checker , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = 1
elif isinstance(snake_case , snake_case ):
UpperCamelCase__ = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self._encode_prompt(
snake_case , snake_case , snake_case , snake_case , snake_case )
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = torch.cat(snake_case , dim=0 )
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
UpperCamelCase__ = self.scheduler.timesteps
UpperCamelCase__ = self.unet.config.in_channels
UpperCamelCase__, UpperCamelCase__ = get_new_h_w(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
UpperCamelCase__ = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__, UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__, UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , ).prev_sample
# post-processing
UpperCamelCase__ = self.movq.decode(snake_case , force_not_quantize=snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 185
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A : Optional[Any] = None
A : Any = logging.get_logger(__name__)
A : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A : List[Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
A : Optional[int] = "▁"
# Segments (not really needed)
A : str = 0
A : Dict = 1
A : List[str] = 2
A : Tuple = 3
A : Tuple = 4
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] ="""left"""
__UpperCAmelCase : List[str] =XLNetTokenizer
def __init__( self , __a=None , __a=None , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , __a , __a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 636
|
"""simple docstring"""
import baseaa
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return baseaa.baadecode(_UpperCamelCase ).decode("utf-8" )
if __name__ == "__main__":
A : List[str] = "Hello World!"
A : Any = baseaa_encode(test)
print(encoded)
A : List[Any] = baseaa_decode(encoded)
print(decoded)
| 636
| 1
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
UpperCAmelCase = get_aligned_output_features_output_indices(a_ ,a_ ,a_ )
self.assertEqual(a_ ,["c"] )
self.assertEqual(a_ ,[2] )
# Out indices set to match out features
UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] ,a_ ,a_ )
self.assertEqual(a_ ,["a", "c"] )
self.assertEqual(a_ ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase = get_aligned_output_features_output_indices(a_ ,[0, 2] ,a_ )
self.assertEqual(a_ ,["a", "c"] )
self.assertEqual(a_ ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase = get_aligned_output_features_output_indices(a_ ,[-3, -1] ,a_ )
self.assertEqual(a_ ,["a", "c"] )
self.assertEqual(a_ ,[-3, -1] )
def _UpperCAmelCase ( self : Union[str, Any] ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] ,(0, 1) ,a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") ,(0, 1) ,["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] ,(0, 1) ,["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ ,0 ,["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ ,(0, 1) ,["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] ,(0,) ,["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] ,(0, 2) ,["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] ,(0, 1) ,["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] ,(0, 1, -1) ,["a", "b", "c", "d"] )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["a", "b", "c"]
UpperCAmelCase = ["a", "c"]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["a", "c"] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features ,["a", "b"] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["a", "c"] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 707
|
def __UpperCamelCase ( _lowerCAmelCase = 10 ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or n < 0:
raise ValueError("Invalid input" )
UpperCAmelCase = 10**n
UpperCAmelCase = 2_84_33 * (pow(2 , 7_83_04_57 , _lowerCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(10) = }")
| 405
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model"}
__A = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__A = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=False , **UpperCamelCase__ : int , )-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
__lowerCAmelCase: str = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase: Tuple = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCamelCase__))
__lowerCAmelCase: Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase: List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase: Dict = 1
__lowerCAmelCase: Union[str, Any] = len(self.sp_model)
__lowerCAmelCase: Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__)
}
__lowerCAmelCase: Dict = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCAmelCase: int = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__lowerCAmelCase: Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCAmelCase: Dict = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
__lowerCAmelCase: List[Any] = src_lang if src_lang is not None else "eng_Latn"
__lowerCAmelCase: Dict = self.lang_code_to_id[self._src_lang]
__lowerCAmelCase: Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Optional[Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.__dict__.copy()
__lowerCAmelCase: List[Any] = None
__lowerCAmelCase: List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , UpperCamelCase__ : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__lowerCAmelCase: int = {}
__lowerCAmelCase: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str)-> None:
'''simple docstring'''
__lowerCAmelCase: int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[str] = [1] * len(self.prefix_tokens)
__lowerCAmelCase: Optional[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__)) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__)) + ([0] * len(UpperCamelCase__)) + suffix_ones
def lowercase_ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase_ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
__lowerCAmelCase: List[Any] = src_lang
__lowerCAmelCase: Optional[Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: List[str] = self.convert_tokens_to_ids(UpperCamelCase__)
__lowerCAmelCase: Dict = tgt_lang_id
return inputs
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = {self.convert_ids_to_tokens(UpperCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowercase_ ( self : str , UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple)-> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase: Dict = self.sp_model.PieceToId(UpperCamelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : Any , UpperCamelCase__ : str)-> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowercase_ ( self : List[str] , UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[str] = "".join(UpperCamelCase__).replace(UpperCamelCase__ , " ").strip()
return out_string
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCAmelCase: Tuple = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase__ , "wb") as fi:
__lowerCAmelCase: List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__)
return (out_vocab_file,)
def lowercase_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "eng_Latn" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "fra_Latn" , **UpperCamelCase__ : str , )-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: str = src_lang
__lowerCAmelCase: Any = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str])-> None:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__lowerCAmelCase: str = []
__lowerCAmelCase: List[str] = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase: List[str] = [self.cur_lang_code]
__lowerCAmelCase: Optional[Any] = [self.eos_token_id]
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str)-> None:
'''simple docstring'''
__lowerCAmelCase: int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__lowerCAmelCase: Optional[Any] = []
__lowerCAmelCase: Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase: Tuple = [self.cur_lang_code]
__lowerCAmelCase: Tuple = [self.eos_token_id]
| 346
|
"""simple docstring"""
def a__ ( ) -> list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
assert all(row == sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(__SCREAMING_SNAKE_CASE ) == sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ) for col in zip(*__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase: Optional[int] = (left + right) // 2
__lowerCAmelCase: List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase: List[str] = mid + 1
else:
__lowerCAmelCase: Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Optional[int] = len(grid[0] )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return len([number for row in grid for number in row if number < 0] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[Any] = 0
for row in grid:
for i, number in enumerate(__SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(__SCREAMING_SNAKE_CASE ) - i
break
return total
def a__ ( ) -> None:
from timeit import timeit
print("Running benchmarks" )
__lowerCAmelCase: Dict = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase: Optional[int] = timeit(F"{func}(grid=grid)" , setup=__SCREAMING_SNAKE_CASE , number=5_0_0 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 346
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 705
|
from functools import lru_cache
@lru_cache
def _lowerCAmelCase ( __magic_name__ :int ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__UpperCAmelCase : Tuple = precision
__UpperCAmelCase : Union[str, Any] = ceil(precision / 14 )
__UpperCAmelCase : List[str] = 426880 * Decimal(10005 ).sqrt()
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Optional[int] = 13591409
__UpperCAmelCase : Optional[int] = Decimal(lowercase_ )
for k in range(1 , lowercase_ ):
__UpperCAmelCase : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 462
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = StableUnCLIPImgaImgPipeline
_lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase : str = frozenset([] )
def A( self):
__UpperCAmelCase : Any = 3_2
__UpperCAmelCase : Union[str, Any] = embedder_hidden_size
# image encoding components
__UpperCAmelCase : Union[str, Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2)
torch.manual_seed(0)
__UpperCAmelCase : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase__ , projection_dim=lowercase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCAmelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase__)
__UpperCAmelCase : Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCAmelCase : int = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__UpperCAmelCase : Tuple = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = AutoencoderKL()
__UpperCAmelCase : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def A( self , lowercase__ , lowercase__=0 , lowercase__=True):
if str(lowercase__).startswith('''mps'''):
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(lowercase__)
else:
__UpperCAmelCase : int = torch.Generator(device=lowercase__).manual_seed(lowercase__)
__UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__)).to(lowercase__)
if pil_image:
__UpperCAmelCase : int = input_image * 0.5 + 0.5
__UpperCAmelCase : str = input_image.clamp(0 , 1)
__UpperCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCAmelCase : Union[str, Any] = DiffusionPipeline.numpy_to_pil(lowercase__)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A( self):
__UpperCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : List[Any] = StableUnCLIPImgaImgPipeline(**lowercase__)
__UpperCAmelCase : List[str] = sd_pipe.to(lowercase__)
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase__)
inputs.update({'''image_embeds''': None})
__UpperCAmelCase : Any = sd_pipe(**lowercase__).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__)
def A( self):
__UpperCAmelCase : str = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A( self):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase__)
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : Any = pipe(lowercase__ , '''anime turle''' , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCAmelCase : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCAmelCase : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : int = pipe(lowercase__ , '''anime turle''' , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCAmelCase : Optional[int] = pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : int = pipe(
lowercase__ , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 462
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase__ : Union[str, Any] = 12_80_22
lowerCAmelCase__ : Any = 12_80_28
@require_sentencepiece
class __snake_case ( __lowerCAmelCase ,unittest.TestCase ):
__lowerCamelCase = MaMaaaTokenizer
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case__ : Optional[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
snake_case__ : Any = Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
snake_case__ : Dict = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **__UpperCamelCase ) -> int:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __a ( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(lowerCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __a ( self ) -> str:
'''simple docstring'''
pass
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
snake_case__ : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , 'This is a test' )
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
__lowerCamelCase = "facebook/m2m100_418M"
__lowerCamelCase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__lowerCamelCase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__lowerCamelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __a ( cls ) -> Tuple:
'''simple docstring'''
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
snake_case__ : int = 1
return cls
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128063 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , lowerCamelCase__ )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def __a ( self ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
snake_case__ : Optional[int] = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
snake_case__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase__ )
snake_case__ : List[Any] = MaMaaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase__ )
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = '''en'''
snake_case__ : str = '''fr'''
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='pt' )
snake_case__ : Any = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : List[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case__ : int = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
'input_ids': [[128022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128006,
} , )
| 718
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699
| 0
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCamelCase ( __snake_case ):
lowercase_ : str = ["""input_values""", """attention_mask"""]
def __init__( self : int , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_6000 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : bool = False , UpperCAmelCase : int = 80 , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 64 , UpperCAmelCase : str = "hann_window" , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 7600 , UpperCAmelCase : float = 1e-1_0 , UpperCAmelCase : int = 2 , UpperCAmelCase : bool = True , **UpperCAmelCase : Any , ) -> str:
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase :int = do_normalize
lowerCAmelCase :Any = return_attention_mask
lowerCAmelCase :str = num_mel_bins
lowerCAmelCase :str = hop_length
lowerCAmelCase :List[Any] = win_length
lowerCAmelCase :Tuple = win_function
lowerCAmelCase :Any = frame_signal_scale
lowerCAmelCase :Any = fmin
lowerCAmelCase :Any = fmax
lowerCAmelCase :List[str] = mel_floor
lowerCAmelCase :str = reduction_factor
lowerCAmelCase :str = win_length * sampling_rate // 1000
lowerCAmelCase :Optional[int] = hop_length * sampling_rate // 1000
lowerCAmelCase :Tuple = optimal_fft_length(self.sample_size )
lowerCAmelCase :int = (self.n_fft // 2) + 1
lowerCAmelCase :Tuple = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase )
lowerCAmelCase :Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase :List[str] = np.array(UpperCAmelCase , np.intaa )
lowerCAmelCase :Dict = []
for vector, length in zip(UpperCAmelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase :Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase :List[str] = padding_value
normed_input_values.append(UpperCAmelCase )
else:
lowerCAmelCase :Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , ) -> np.ndarray:
lowerCAmelCase :Tuple = spectrogram(
UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : Tuple , UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Union[str, Any] , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowerCAmelCase :Union[str, Any] = self._process_audio(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase :str = None
if audio_target is not None:
lowerCAmelCase :Union[str, Any] = self._process_audio(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase :int = inputs_target["input_values"]
lowerCAmelCase :Optional[int] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase :List[str] = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : bool = False , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : int , ) -> BatchFeature:
lowerCAmelCase :int = isinstance(UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase :List[str] = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase :List[Any] = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
lowerCAmelCase :Optional[Any] = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase :Optional[int] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase :int = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase :Tuple = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase :List[str] = [self._extract_mel_features(UpperCAmelCase ) for waveform in speech]
lowerCAmelCase :int = BatchFeature({'input_values': features} )
lowerCAmelCase :str = self.num_mel_bins
else:
lowerCAmelCase :Optional[int] = BatchFeature({'input_values': speech} )
lowerCAmelCase :List[Any] = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase :Optional[Any] = feature_size_hack
# convert input values to correct format
lowerCAmelCase :int = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase :Any = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase :Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase :Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase :Union[str, Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase :str = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase :Dict = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase :List[Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase :Any = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
def UpperCAmelCase__ ( self : Dict ) -> Dict[str, Any]:
lowerCAmelCase :str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase :Optional[Any] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 553
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase ( __snake_case ):
_A = (DEISMultistepScheduler,)
_A = (("num_inference_steps", 25),)
def _a(self : Optional[int] , **snake_case : str ) -> Optional[Any]:
_lowercase : str = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case )
return config
def _a(self : Optional[int] , snake_case : Optional[int]=0 , **snake_case : List[Any] ) -> Dict:
_lowercase : Dict = dict(self.forward_default_kwargs )
_lowercase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case )
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Tuple = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : Dict = self.get_scheduler_config(**snake_case )
_lowercase : int = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
_lowercase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
_lowercase : Any = scheduler_class.from_pretrained(snake_case )
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
_lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : Optional[Any] = sample, sample
for t in range(snake_case , time_step + scheduler.config.solver_order + 1 ):
_lowercase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : str ) -> List[str]:
pass
def _a(self : Optional[Any] , snake_case : List[Any]=0 , **snake_case : str ) -> Union[str, Any]:
_lowercase : List[str] = dict(self.forward_default_kwargs )
_lowercase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case )
_lowercase : Dict = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : int = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
_lowercase : str = scheduler_class.from_pretrained(snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Any = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Dict = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : Dict , snake_case : str=None , **snake_case : Dict ) -> Optional[Any]:
if scheduler is None:
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : Dict = self.get_scheduler_config(**snake_case )
_lowercase : Optional[int] = scheduler_class(**snake_case )
_lowercase : Tuple = self.scheduler_classes[0]
_lowercase : str = self.get_scheduler_config(**snake_case )
_lowercase : Tuple = scheduler_class(**snake_case )
_lowercase : Optional[int] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : str = model(snake_case , snake_case )
_lowercase : int = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
return sample
def _a(self : Any ) -> Tuple:
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case )
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : Optional[Any] = scheduler_class(**snake_case )
_lowercase : Tuple = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case , "set_timesteps" ):
scheduler.set_timesteps(snake_case )
elif num_inference_steps is not None and not hasattr(snake_case , "set_timesteps" ):
_lowercase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowercase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
_lowercase : Tuple = scheduler.timesteps[5]
_lowercase : Dict = scheduler.timesteps[6]
_lowercase : Tuple = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a(self : Optional[int] ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowercase : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowercase : List[Any] = self.full_loop(scheduler=snake_case )
_lowercase : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
_lowercase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowercase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowercase : int = DEISMultistepScheduler.from_config(scheduler.config )
_lowercase : Union[str, Any] = self.full_loop(scheduler=snake_case )
_lowercase : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a(self : Union[str, Any] ) -> Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def _a(self : Optional[Any] ) -> List[str]:
self.check_over_configs(thresholding=snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , algorithm_type="deis" , solver_order=snake_case , solver_type=snake_case , )
def _a(self : int ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def _a(self : Tuple ) -> List[Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
_lowercase : Any = self.full_loop(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
assert not torch.isnan(snake_case ).any(), "Samples have nan numbers"
def _a(self : int ) -> List[str]:
self.check_over_configs(lower_order_final=snake_case )
self.check_over_configs(lower_order_final=snake_case )
def _a(self : Any ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=snake_case , time_step=0 )
def _a(self : Tuple ) -> Optional[Any]:
_lowercase : Optional[Any] = self.full_loop()
_lowercase : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a(self : Optional[int] ) -> Dict:
_lowercase : int = self.full_loop(prediction_type="v_prediction" )
_lowercase : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _a(self : int ) -> Optional[int]:
_lowercase : int = self.scheduler_classes[0]
_lowercase : Tuple = self.get_scheduler_config(thresholding=snake_case , dynamic_thresholding_ratio=0 )
_lowercase : int = scheduler_class(**snake_case )
_lowercase : Optional[int] = 10
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : Tuple = model(snake_case , snake_case )
_lowercase : List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 461
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCamelCase : Tuple = "docs/source/en/_toctree.yml"
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = defaultdict(UpperCamelCase__ )
A = []
A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCamelCase__ )
A = new_doc_list
A = [key for key, value in counts.items() if value > 1]
A = []
for duplicate_key in duplicates:
A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
A = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def __snake_case ( UpperCamelCase__=False ) -> Any:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]['sections']
# Then to the model doc
A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A = api_doc[scheduler_idx]['sections']
A = clean_doc_toc(UpperCamelCase__ )
A = False
if new_scheduler_doc != scheduler_doc:
A = True
if overwrite:
A = new_scheduler_doc
if diff:
if overwrite:
A = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __snake_case ( UpperCamelCase__=False ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]['sections']
# Then to the model doc
A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A = False
A = api_doc[pipeline_idx]['sections']
A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A = pipeline_doc['section']
A = clean_doc_toc(UpperCamelCase__ )
if overwrite:
A = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
A = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
A = True
if overwrite:
A = new_pipeline_docs
if diff:
if overwrite:
A = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 715
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : str , **_lowercase : Union[str, Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(_lowercase )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' , self.tpu_name )
A = kwargs.pop('device_idx' , self.device_idx )
A = kwargs.pop('eager_mode' , self.eager_mode )
A = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**_lowercase )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Name of TPU"""} , )
lowerCAmelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def __a ( self : Dict ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
A = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def __a ( self : List[Any] ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __a ( self : str ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __a ( self : Any ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __a ( self : Dict ):
return self.n_gpu > 0
| 91
| 0
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
|
"""simple docstring"""
A_ : Any = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 196
| 1
|
lowerCamelCase__ = 9.80665
def lowerCAmelCase__ ( a__ , a__ , a__ = g ) ->float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 135
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : int = ['''image_processor''', '''tokenizer''']
A__ : List[Any] = '''BlipImageProcessor'''
A__ : int = '''AutoTokenizer'''
def __init__( self , A , A , A ) -> str:
super().__init__(A , A )
# add QFormer tokenizer
A: List[str] = qformer_tokenizer
def __call__( self , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
A: Dict = BatchFeature()
if text is not None:
A: Tuple = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
encoding.update(A )
A: Optional[int] = self.qformer_tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
A: Union[str, Any] = qformer_text_encoding.pop("""input_ids""" )
A: Any = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
A: Union[str, Any] = self.image_processor(A , return_tensors=A )
encoding.update(A )
return encoding
def a__ ( self , *A , **A ) -> Dict:
return self.tokenizer.batch_decode(*A , **A )
def a__ ( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a__ ( self ) -> int:
A: Any = self.tokenizer.model_input_names
A: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a__ ( self , A , **A ) -> Optional[int]:
if os.path.isfile(A ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(A , exist_ok=A )
A: Union[str, Any] = os.path.join(A , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(A )
return super().save_pretrained(A , **A )
@classmethod
def a__ ( cls , A , **A ) -> List[str]:
A: int = AutoTokenizer.from_pretrained(A , subfolder="""qformer_tokenizer""" )
A: List[str] = cls._get_arguments_from_pretrained(A , **A )
args.append(A )
return cls(*A )
| 135
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase : Any = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='albert'
def __init__( self : Optional[Any] , _lowerCamelCase : Any=3_0_0_0_0 , _lowerCamelCase : int=1_2_8 , _lowerCamelCase : Optional[Any]=4_0_9_6 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Tuple=6_4 , _lowerCamelCase : Optional[Any]=1_6_3_8_4 , _lowerCamelCase : Dict=1 , _lowerCamelCase : Tuple="gelu_new" , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Optional[int]=5_1_2 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : int=0.02 , _lowerCamelCase : Tuple=1E-12 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : str="absolute" , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : str=3 , **_lowerCamelCase : List[Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Dict = vocab_size
snake_case__ : Union[str, Any] = embedding_size
snake_case__ : str = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_hidden_groups
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = inner_group_num
snake_case__ : Dict = hidden_act
snake_case__ : int = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : int = classifier_dropout_prob
snake_case__ : Optional[int] = position_embedding_type
class snake_case__ ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : str ):
if self.task == "multiple-choice":
snake_case__ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 303
|
from math import ceil
def lowercase__( A = 1_0_0_1 ):
snake_case__ : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case__ : str = 2 * i + 1
snake_case__ : Any = 2 * i
snake_case__ : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCamelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 303
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = (DEISMultistepScheduler,)
__lowerCamelCase = (('num_inference_steps', 25),)
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_lowerCAmelCase )
return config
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
UpperCAmelCase__ : str = dict(self.forward_default_kwargs )
UpperCAmelCase__ : int = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = self.dummy_sample
UpperCAmelCase__ : List[str] = 0.1 * sample
UpperCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : List[Any] = self.get_scheduler_config(**_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
UpperCAmelCase__ : str = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase__ : str = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
UpperCAmelCase__ : int = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ):
UpperCAmelCase__ : int = dict(self.forward_default_kwargs )
UpperCAmelCase__ : str = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
UpperCAmelCase__ : int = self.dummy_sample
UpperCAmelCase__ : Tuple = 0.1 * sample
UpperCAmelCase__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
UpperCAmelCase__ : Any = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ : Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
UpperCAmelCase__ : int = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , _lowerCAmelCase=None , **_lowerCAmelCase ):
if scheduler is None:
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(**_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.scheduler_classes[0]
UpperCAmelCase__ : Any = self.get_scheduler_config(**_lowerCAmelCase )
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCAmelCase )
UpperCAmelCase__ : Dict = 10
UpperCAmelCase__ : int = self.dummy_model()
UpperCAmelCase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Any = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = dict(self.forward_default_kwargs )
UpperCAmelCase__ : List[Any] = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase__ : Optional[int] = scheduler_class(**_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.dummy_sample
UpperCAmelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(_lowerCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
UpperCAmelCase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase__ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase__ : Union[str, Any] = scheduler.timesteps[6]
UpperCAmelCase__ : List[str] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
UpperCAmelCase__ : List[str] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase__ : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase__ : Dict = self.full_loop(scheduler=_lowerCAmelCase )
UpperCAmelCase__ : Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
UpperCAmelCase__ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : str = self.full_loop(scheduler=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __UpperCAmelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="""deis""" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def __UpperCAmelCase ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
UpperCAmelCase__ : Any = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self ):
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def __UpperCAmelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Any = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCAmelCase )
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 79
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__lowerCAmelCase = "pytorch_model.bin"
__lowerCAmelCase = "pytorch_model.bin.index.json"
__lowerCAmelCase = "adapter_config.json"
__lowerCAmelCase = "adapter_model.bin"
__lowerCAmelCase = "adapter_model.safetensors"
__lowerCAmelCase = "tf_model.h5"
__lowerCAmelCase = "tf_model.h5.index.json"
__lowerCAmelCase = "model.ckpt"
__lowerCAmelCase = "flax_model.msgpack"
__lowerCAmelCase = "flax_model.msgpack.index.json"
__lowerCAmelCase = "model.safetensors"
__lowerCAmelCase = "model.safetensors.index.json"
__lowerCAmelCase = "config.json"
__lowerCAmelCase = "preprocessor_config.json"
__lowerCAmelCase = FEATURE_EXTRACTOR_NAME
__lowerCAmelCase = "generation_config.json"
__lowerCAmelCase = "modelcard.json"
__lowerCAmelCase = "▁"
__lowerCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__lowerCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__lowerCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__lowerCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
if version.parse(lowercase_ ) < version.parse(lowercase_ ):
if "dev" in min_version:
a_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
a_ = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 536
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def UpperCAmelCase_ ( A = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A ):
_a : List[Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A )[1] in (".py", ".ipynb"):
yield os.path.join(A , A ).lstrip('./' )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return f'''{i * " "}*''' if i else "\n##"
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : int = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(A )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def UpperCAmelCase_ ( A = "." ):
'''simple docstring'''
_a : List[Any] = ''
for filepath in sorted(good_file_paths(A ) ):
_a , _a : Any = os.path.split(A )
if filepath != old_path:
_a : str = print_path(A , A )
_a : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_a : Tuple = f'''{filepath}/{filename}'''.replace(' ' , '%20' )
_a : Optional[int] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'''{md_prefix(A )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 424
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase_ : str = TypeVar("T")
class a ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase : deque[T] # Cache store of keys
__lowerCAmelCase : set[T] # References of the keys in cache
__lowerCAmelCase : int = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase_ ) -> None:
_a : List[str] = deque()
_a : int = set()
if not n:
_a : List[str] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_a : Union[str, Any] = n
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_a : Dict = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def __UpperCamelCase ( self ) -> None:
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( A , A ) -> str | Literal[False]:
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = 0
for i in range(len(A ) ):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase__ = '''_'''
if count > 1:
return False
else:
return "".join(A )
def _snake_case ( A ) -> list[str]:
lowerCAmelCase__ = []
while True:
lowerCAmelCase__ = ['''$'''] * len(A )
lowerCAmelCase__ = []
for i in range(len(A ) ):
for j in range(i + 1 , len(A ) ):
lowerCAmelCase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowerCAmelCase__ = '''*'''
lowerCAmelCase__ = '''*'''
temp.append('''X''' )
for i in range(len(A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A ) == 0:
return pi
lowerCAmelCase__ = list(set(A ) )
def _snake_case ( A , A ) -> list[str]:
lowerCAmelCase__ = []
for minterm in minterms:
lowerCAmelCase__ = ''''''
for _ in range(A ):
lowerCAmelCase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(A )
return temp
def _snake_case ( A , A , A ) -> bool:
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = list(A )
lowerCAmelCase__ = 0
for i in range(len(A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( A , A ) -> list[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = [0] * len(A )
for i in range(len(chart[0] ) ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
for j in range(len(A ) ):
if chart[j][i] == 1:
count += 1
lowerCAmelCase__ = j
if count == 1:
lowerCAmelCase__ = 1
for i in range(len(A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A ) ):
lowerCAmelCase__ = 0
temp.append(prime_implicants[i] )
while True:
lowerCAmelCase__ = 0
lowerCAmelCase__ = -1
lowerCAmelCase__ = 0
for i in range(len(A ) ):
lowerCAmelCase__ = chart[i].count(1 )
if count_n > max_n:
lowerCAmelCase__ = count_n
lowerCAmelCase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A ) ):
lowerCAmelCase__ = 0
def _snake_case ( A , A ) -> list[list[int]]:
lowerCAmelCase__ = [[0 for x in range(len(A ) )] for x in range(len(A ) )]
for i in range(len(A ) ):
lowerCAmelCase__ = prime_implicants[i].count('''_''' )
for j in range(len(A ) ):
if is_for_table(prime_implicants[i] , binary[j] , A ):
lowerCAmelCase__ = 1
return chart
def _snake_case ( ) -> None:
lowerCAmelCase__ = int(input('''Enter the no. of variables\n''' ) )
lowerCAmelCase__ = [
float(A )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowerCAmelCase__ = decimal_to_binary(A , A )
lowerCAmelCase__ = check(A )
print('''Prime Implicants are:''' )
print(A )
lowerCAmelCase__ = prime_implicant_chart(A , A )
lowerCAmelCase__ = selection(A , A )
print('''Essential Prime Implicants are:''' )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 90
|
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 90
| 1
|
import qiskit
def _A ( __snake_case :int , __snake_case :int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(__snake_case , __snake_case )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__SCREAMING_SNAKE_CASE = qiskit.execute(__snake_case , __snake_case , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 214
|
import re
def _A ( __snake_case :str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , __snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 1
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['flax'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax'] )
| 23
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''informer'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> Tuple:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : Tuple = context_length or prediction_length
UpperCAmelCase_ : Any = distribution_output
UpperCAmelCase_ : Union[str, Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : List[Any] = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Optional[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
# Informer
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : List[str] = sampling_factor
UpperCAmelCase_ : Union[str, Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Tuple: # noqa: E741
lowerCamelCase = len(__A )
lowerCamelCase = 0
lowerCamelCase = [0] * n
lowerCamelCase = [False] * n
lowerCamelCase = [False] * n
def dfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if parent == root:
out_edge_count += 1
lowerCamelCase = True
lowerCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCamelCase = dfs(__A , __A , __A , __A )
lowerCamelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCamelCase = True
# AP found via cycle
if at == low[to]:
lowerCamelCase = True
else:
lowerCamelCase = min(low[at] , __A )
return out_edge_count
for i in range(__A ):
if not visited[i]:
lowerCamelCase = 0
lowerCamelCase = dfs(__A , __A , -1 , __A )
lowerCamelCase = out_edge_count > 1
for x in range(len(__A ) ):
if is_art[x] is True:
print(__A )
# Adjacency list of graph
A = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 708
|
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase__ :
def __init__( self : Any , snake_case : list[list[int]] ) -> Union[str, Any]:
'''simple docstring'''
A = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
A = rows
else:
A = []
def A_ ( self : Any ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def A_ ( self : int ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def A_ ( self : int ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A_ ( self : Tuple ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def A_ ( self : Tuple ) -> Matrix:
'''simple docstring'''
A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self : Dict ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def A_ ( self : Any , snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def A_ ( self : Tuple , snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def A_ ( self : List[str] ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self : Union[str, Any] ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self : Union[str, Any] ) -> Matrix:
'''simple docstring'''
A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def A_ ( self : Tuple ) -> Matrix:
'''simple docstring'''
A = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self : List[str] ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def A_ ( self : Union[str, Any] , snake_case : list[int] , snake_case : int | None = None ) -> None:
'''simple docstring'''
A = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
A = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self : Tuple , snake_case : list[int] , snake_case : int | None = None ) -> None:
'''simple docstring'''
A = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : str , snake_case : object ) -> bool:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int , snake_case : object ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self : List[Any] ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self : Tuple , snake_case : Matrix ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Tuple , snake_case : Matrix ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , snake_case : Matrix | int | float ) -> Matrix:
'''simple docstring'''
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[Any] , snake_case : int ) -> Matrix:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls : Optional[int] , snake_case : list[int] , snake_case : list[int] ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=1_8 , UpperCAmelCase=3_0 , UpperCAmelCase=4_0_0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , ):
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 2_0}
__lowerCamelCase = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
def lowerCamelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
A = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
__lowerCamelCase = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """crop_size""" ) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def lowerCamelCase_ ( self ):
pass
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 479
|
def _lowerCamelCase( __snake_case ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCamelCase( __snake_case ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
| 0
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = v.conjugate().T
lowerCamelCase__ : Optional[Any] = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCamelCase__ : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 714
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
| 1
|
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 376
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'mctct'
def __init__( self : Dict , snake_case__ : Any=8_065 , snake_case__ : Tuple=1_536 , snake_case__ : Union[str, Any]=36 , snake_case__ : List[Any]=6_144 , snake_case__ : Tuple=4 , snake_case__ : List[str]=384 , snake_case__ : List[Any]=920 , snake_case__ : Any=1E-5 , snake_case__ : List[Any]=0.3 , snake_case__ : List[str]="relu" , snake_case__ : List[Any]=0.0_2 , snake_case__ : Optional[Any]=0.3 , snake_case__ : List[str]=0.3 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Any=2 , snake_case__ : Any=1 , snake_case__ : List[str]=0.3 , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=(7,) , snake_case__ : List[Any]=(3,) , snake_case__ : List[Any]=80 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=None , snake_case__ : int="sum" , snake_case__ : Dict=False , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = layerdrop
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = conv_glu_dim
__lowerCAmelCase = conv_dropout
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
__lowerCAmelCase = conv_channels
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 376
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase : Dict = logging.getLogger(__name__)
def lowercase__( A , A ):
return (preds == labels).mean()
@dataclass
class snake_case__ :
_lowerCAmelCase =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCAmelCase =field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class snake_case__ :
_lowerCAmelCase =field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_lowerCAmelCase =field(metadata={'help': 'Should contain the data files for the task.'} )
_lowerCAmelCase =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCAmelCase =field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
try:
snake_case__ : List[str] = processors[data_args.task_name]()
snake_case__ : int = processor.get_labels()
snake_case__ : int = len(A )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ : List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
snake_case__ : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
snake_case__ : Tuple = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : int = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
return results
def lowercase__( A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
def lowercase__( A , A ):
snake_case__ : int = np.argmax(A , axis=1 )
return np.sum(outputs == labels )
def lowercase__( A ):
with open(A , encoding='utf_8' ) as f:
snake_case__ : Dict = csv.reader(A )
snake_case__ : int = []
next(A ) # skip the first line
for line in tqdm(A ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowercase__( A , A , A , A , A , A ):
snake_case__ : int = []
for dataset in encoded_datasets:
snake_case__ : str = len(A )
snake_case__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case__ : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
snake_case__ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A ):
snake_case__ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Optional[int] = with_conta
snake_case__ : int = with_conta
snake_case__ : Optional[Any] = len(A ) - 1
snake_case__ : str = len(A ) - 1
snake_case__ : Any = with_conta
snake_case__ : Any = with_conta
snake_case__ : List[str] = mc_label
snake_case__ : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A ) for t in all_inputs ) )
return tensor_datasets
def lowercase__( ):
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=A , default='' )
parser.add_argument('--eval_dataset' , type=A , default='' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--num_train_epochs' , type=A , default=3 )
parser.add_argument('--train_batch_size' , type=A , default=8 )
parser.add_argument('--eval_batch_size' , type=A , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=A , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=A , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=A , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=A , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=A , default=0.01 )
parser.add_argument('--lm_coef' , type=A , default=0.9 )
parser.add_argument('--n_valid' , type=A , default=3_7_4 )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
print(A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case__ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case__ : int = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A , A ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case__ : Tuple = ['_start_', '_delimiter_', '_classify_']
snake_case__ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A )
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(A )
snake_case__ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A ) )
model.to(A )
# Load and encode the datasets
def tokenize_and_encode(A ):
if isinstance(A , A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A ) )
elif isinstance(A , A ):
return obj
return [tokenize_and_encode(A ) for o in obj]
logger.info('Encoding dataset...' )
snake_case__ : str = load_rocstories_dataset(args.train_dataset )
snake_case__ : List[str] = load_rocstories_dataset(args.eval_dataset )
snake_case__ : Optional[Any] = (train_dataset, eval_dataset)
snake_case__ : Any = tokenize_and_encode(A )
# Compute the max input length for the Transformer
snake_case__ : Any = model.config.n_positions // 2 - 2
snake_case__ : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case__ : int = min(A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case__ : List[Any] = pre_process_datasets(A , A , A , *A )
snake_case__ , snake_case__ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
snake_case__ : Tuple = TensorDataset(*A )
snake_case__ : List[str] = RandomSampler(A )
snake_case__ : int = DataLoader(A , sampler=A , batch_size=args.train_batch_size )
snake_case__ : str = TensorDataset(*A )
snake_case__ : Dict = SequentialSampler(A )
snake_case__ : List[str] = DataLoader(A , sampler=A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case__ : Union[str, Any] = args.max_steps
snake_case__ : Dict = args.max_steps // (len(A ) // args.gradient_accumulation_steps) + 1
else:
snake_case__ : int = len(A ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case__ : Tuple = list(model.named_parameters() )
snake_case__ : List[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
snake_case__ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
snake_case__ : int = AdamW(A , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case__ : Tuple = get_linear_schedule_with_warmup(
A , num_warmup_steps=args.warmup_steps , num_training_steps=A )
if args.do_train:
snake_case__ , snake_case__ , snake_case__ : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
snake_case__ : str = 0
snake_case__ : Dict = 0
snake_case__ : Dict = tqdm(A , desc='Training' )
for step, batch in enumerate(A ):
snake_case__ : List[str] = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = batch
snake_case__ : int = model(A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Tuple = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case__ : Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case__ : Optional[int] = 'Training loss: {:.2e} lr: {:.2e}'.format(A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case__ : List[Any] = model.module if hasattr(A , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case__ : Union[str, Any] = os.path.join(args.output_dir , A )
snake_case__ : List[str] = os.path.join(args.output_dir , A )
torch.save(model_to_save.state_dict() , A )
model_to_save.config.to_json_file(A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case__ : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A )
if args.do_eval:
model.eval()
snake_case__ , snake_case__ : int = 0, 0
snake_case__ , snake_case__ : List[Any] = 0, 0
for batch in tqdm(A , desc='Evaluating' ):
snake_case__ : Tuple = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = batch
with torch.no_grad():
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = model(
A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Union[str, Any] = mc_logits.detach().cpu().numpy()
snake_case__ : List[Any] = mc_labels.to('cpu' ).numpy()
snake_case__ : Optional[int] = accuracy(A , A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case__ : str = eval_loss / nb_eval_steps
snake_case__ : Any = eval_accuracy / nb_eval_examples
snake_case__ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
snake_case__ : Optional[int] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
snake_case__ : Optional[int] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 170
| 1
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case = 'main'
# Default branch name
snake_case = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def UpperCamelCase_ ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCamelCase_ ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> str:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> str:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> List[Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def A ( self ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_tf
def A ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_flax
def A ( self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , [] )
| 712
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1_0_0_0 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE = n - 1
SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE = 0
while count < prec:
SCREAMING_SNAKE_CASE = random.randint(2, n - 1 )
SCREAMING_SNAKE_CASE = bin_exp_mod(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if b != 1:
SCREAMING_SNAKE_CASE = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
snake_case = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 406
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=56 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=7 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=4 , lowerCAmelCase="block_sparse" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=3 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: Any= seq_length
SCREAMING_SNAKE_CASE__: Optional[int]= is_training
SCREAMING_SNAKE_CASE__: Tuple= use_attention_mask
SCREAMING_SNAKE_CASE__: Union[str, Any]= use_token_type_ids
SCREAMING_SNAKE_CASE__: Union[str, Any]= use_labels
SCREAMING_SNAKE_CASE__: Any= vocab_size
SCREAMING_SNAKE_CASE__: Tuple= hidden_size
SCREAMING_SNAKE_CASE__: Optional[Any]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Dict= num_attention_heads
SCREAMING_SNAKE_CASE__: Tuple= intermediate_size
SCREAMING_SNAKE_CASE__: str= hidden_act
SCREAMING_SNAKE_CASE__: List[str]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: List[str]= max_position_embeddings
SCREAMING_SNAKE_CASE__: str= type_vocab_size
SCREAMING_SNAKE_CASE__: Dict= type_sequence_label_size
SCREAMING_SNAKE_CASE__: Any= initializer_range
SCREAMING_SNAKE_CASE__: List[str]= num_choices
SCREAMING_SNAKE_CASE__: Tuple= rescale_embeddings
SCREAMING_SNAKE_CASE__: str= attention_type
SCREAMING_SNAKE_CASE__: List[str]= use_bias
SCREAMING_SNAKE_CASE__: List[str]= block_size
SCREAMING_SNAKE_CASE__: Dict= num_random_blocks
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: List[Any]= None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__: Optional[int]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: List[Any]= BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= config_and_inputs
SCREAMING_SNAKE_CASE__: Any= {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( __UpperCamelCase , unittest.TestCase ):
__a = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__a = False
__a = False
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ) -> int:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ) -> int:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ) -> List[str]:
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__: str= model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__snake_case )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__: Dict= self._prepare_for_class(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE__: Dict= model_class(__snake_case )
@jax.jit
def model_jitted(lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
return model(input_ids=__snake_case , attention_mask=__snake_case , **__snake_case )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__: Dict= model_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__: Optional[int]= model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1e-5 , lowerCAmelCase="outputs" , lowerCAmelCase=None ) -> int:
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
| 64
|
from __future__ import annotations
import math
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCamelCase = u
for i in range(1 , UpperCamelCase_ ):
lowerCamelCase = temp * (u - i)
return temp
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase = int(input('enter the numbers of values: ' ) )
lowerCamelCase = []
for _ in range(UpperCamelCase_ ):
y.append([] )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
y[i].append(UpperCamelCase_ )
lowerCamelCase = 0
print('enter the values of parameters in a list: ' )
lowerCamelCase = list(map(UpperCamelCase_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(UpperCamelCase_ ):
lowerCamelCase = float(input() )
lowerCamelCase = int(input('enter the value to interpolate: ' ) )
lowerCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase_ ):
for j in range(n - i ):
lowerCamelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase = y[0][0]
for i in range(1 , UpperCamelCase_ ):
summ += (ucal(UpperCamelCase_ , UpperCamelCase_ ) * y[0][i]) / math.factorial(UpperCamelCase_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 246
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE_ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_ = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F"&page={i + 2}" , headers=_lowerCAmelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE_ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE_ = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F"&page={i + 2}" , headers=_lowerCAmelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE_ = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = result.headers['''Location''']
SCREAMING_SNAKE_CASE_ = requests.get(_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , F"{artifact_name}.zip" )
with open(_lowerCAmelCase , '''wb''' ) as fp:
fp.write(response.content )
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = None
with zipfile.ZipFile(_lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCAmelCase ) as f:
for line in f:
SCREAMING_SNAKE_CASE_ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE_ = line[: line.index(''': ''' )]
SCREAMING_SNAKE_CASE_ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE_ = line[len('''FAILED ''' ) :]
failed_tests.append(_lowerCAmelCase )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE_ = line
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCAmelCase )} for `errors` "
F"and {len(_lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
SCREAMING_SNAKE_CASE_ = None
if job_name and job_links:
SCREAMING_SNAKE_CASE_ = job_links.get(_lowerCAmelCase , _lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE_ = [x + [y] + [job_link] for x, y in zip(_lowerCAmelCase , _lowerCAmelCase )]
return result
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCAmelCase , job_links=_lowerCAmelCase ) )
return errors
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE_ = counter.most_common()
SCREAMING_SNAKE_CASE_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE_ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE_ = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
SCREAMING_SNAKE_CASE_ = test.split('''/''' )[2]
else:
SCREAMING_SNAKE_CASE_ = None
return test
def a (_lowerCAmelCase , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE_ = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE_ = {x[2] for x in logs}
SCREAMING_SNAKE_CASE_ = {}
for test in tests:
SCREAMING_SNAKE_CASE_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE_ = counter.most_common()
SCREAMING_SNAKE_CASE_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE_ = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE_ = {'''count''': n_errors, '''errors''': error_counts}
SCREAMING_SNAKE_CASE_ = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = '''| no. | error | status |'''
SCREAMING_SNAKE_CASE_ = '''|-:|:-|:-|'''
SCREAMING_SNAKE_CASE_ = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE_ = reduced_by_error[error]['''count''']
SCREAMING_SNAKE_CASE_ = F"| {count} | {error[:1_0_0]} | |"
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = '''| model | no. of errors | major error | count |'''
SCREAMING_SNAKE_CASE_ = '''|-:|-:|-:|-:|'''
SCREAMING_SNAKE_CASE_ = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE_ = reduced_by_model[model]['''count''']
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = list(reduced_by_model[model]['''errors'''].items() )[0]
SCREAMING_SNAKE_CASE_ = F"| {model} | {count} | {error[:6_0]} | {_count} |"
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
__SCREAMING_SNAKE_CASE =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__SCREAMING_SNAKE_CASE =get_job_links(args.workflow_run_id, token=args.token)
__SCREAMING_SNAKE_CASE ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__SCREAMING_SNAKE_CASE =k.find(""" / """)
__SCREAMING_SNAKE_CASE =k[index + len(""" / """) :]
__SCREAMING_SNAKE_CASE =v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__SCREAMING_SNAKE_CASE =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__SCREAMING_SNAKE_CASE =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__SCREAMING_SNAKE_CASE =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE =reduce_by_error(errors)
__SCREAMING_SNAKE_CASE =reduce_by_model(errors)
__SCREAMING_SNAKE_CASE =make_github_table(reduced_by_error)
__SCREAMING_SNAKE_CASE =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 89
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[Any] = "AutoTokenizer"
def __init__( self: Tuple , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _A ( cls: List[Any] , _lowerCamelCase: Tuple , **_lowerCamelCase: List[str] ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self: Union[str, Any] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: Optional[int] , *_lowerCamelCase: str , **_lowerCamelCase: List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _A ( self: str , *_lowerCamelCase: Dict , **_lowerCamelCase: Dict ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Optional[int] , *_lowerCamelCase: Optional[int] , **_lowerCamelCase: Tuple ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 89
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ = 1_0_0_0_0_0_0 ) -> Any:
lowerCAmelCase__ : Tuple = limit + 1
lowerCAmelCase__ : List[Any] = [0] * limit
for first_term in range(1 , __UpperCamelCase ):
for n in range(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase__ : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase__ : List[Any] = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 453
|
'''simple docstring'''
import copy
import re
class lowerCamelCase__:
UpperCamelCase : Dict = "hp"
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : str = None
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = prefix
__lowercase = defaults
cls.build_naming_info()
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if len(__UpperCAmelCase ) == 0:
return ""
__lowercase = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCAmelCase ) + 1 ):
__lowercase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowercase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase ):
__lowercase = """"""
while integer != 0:
__lowercase = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
__lowercase = 0
while True:
__lowercase = word + """#""" + int_to_alphabetic(__UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
__lowercase = sword
break
__lowercase = short_word
__lowercase = word
return short_word
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = param_name.split("""_""" )
__lowercase = [TrialShortNamer.shortname_for_word(__UpperCAmelCase , __UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowercase = ["""""", """_"""]
for separator in separators:
__lowercase = separator.join(__UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
__lowercase = shortname
__lowercase = param_name
return shortname
return param_name
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TrialShortNamer.shortname_for_key(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = short_name
__lowercase = param_name
@classmethod
def __magic_name__ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
__lowercase = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
__lowercase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = info
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
__lowercase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowercase = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowercase = 1 if v else 0
__lowercase = """""" if isinstance(__UpperCAmelCase , (int, float) ) else """-"""
__lowercase = F'''{key}{sep}{v}'''
name.append(__UpperCAmelCase )
return "_".join(__UpperCAmelCase )
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowercase = []
else:
__lowercase = repr.split("""_""" )
__lowercase = {}
for value in values:
if "-" in value:
__lowercase , __lowercase = value.split("""-""" )
else:
__lowercase = re.sub("""[0-9.]""" , """""" , __UpperCAmelCase )
__lowercase = float(re.sub("""[^0-9.]""" , """""" , __UpperCAmelCase ) )
__lowercase = cls.NAMING_INFO["""reverse_short_param"""][p_k]
__lowercase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowercase = cls.DEFAULTS[k]
return parameters
| 566
| 0
|
from collections import defaultdict
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(' ' , '' )
__a = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
__a = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase_ : List[str] = input("""Enter the first string """).strip()
lowerCamelCase_ : int = input("""Enter the second string """).strip()
lowerCamelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=1_0 , UpperCAmelCase=1_8 , UpperCAmelCase=3_0 , UpperCAmelCase=4_0_0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=None , ) -> Tuple:
__a = size if size is not None else {'shortest_edge': 1_8}
__a = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__a = parent
__a = batch_size
__a = num_channels
__a = num_frames
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = crop_size
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( __snake_case , unittest.TestCase ):
A__ : Tuple = VivitImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = VivitImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 246
| 0
|
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
return "".join(chr(ord(_UpperCAmelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 204
|
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0_0_0_0_0 , _UpperCAmelCase = 1_0 ):
lowerCamelCase_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_: Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase_: List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 584
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = 9
lowerCamelCase_: Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_: List[str] = kruskal(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCAmelCase ) == sorted(_UpperCAmelCase )
| 584
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.