code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class lowercase_ :
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'The input training data file (a text file).'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ):
if self.train_file is not None:
_snake_case : int = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_snake_case : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = True
_lowerCamelCase = None
_lowerCamelCase = None
def __call__( self , lowercase_ ):
_snake_case : Tuple = "label" if "label" in features[0].keys() else "labels"
_snake_case : Any = [feature.pop(lowercase_ ) for feature in features]
_snake_case : Any = len(lowercase_ )
_snake_case : List[str] = len(features[0]["input_ids"] )
_snake_case : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase_ )] for feature in features
]
_snake_case : int = list(chain(*lowercase_ ) )
_snake_case : Tuple = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_snake_case : str = {k: v.view(lowercase_ , lowercase_ , -1 ) for k, v in batch.items()}
# Add back labels
_snake_case : Any = torch.tensor(lowercase_ , dtype=torch.intaa )
return batch
def snake_case () -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case ,_snake_case ,_snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case ,_snake_case ,_snake_case : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_snake_case : List[Any] = {}
if data_args.train_file is not None:
_snake_case : int = data_args.train_file
if data_args.validation_file is not None:
_snake_case : Dict = data_args.validation_file
_snake_case : Tuple = data_args.train_file.split("." )[-1]
_snake_case : List[Any] = load_dataset(
__lowercase , data_files=__lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_snake_case : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_snake_case : Union[str, Any] = [F"""ending{i}""" for i in range(4 )]
_snake_case : List[Any] = "sent1"
_snake_case : Any = "sent2"
if data_args.max_seq_length is None:
_snake_case : int = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_snake_case : str = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_snake_case : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__lowercase ):
_snake_case : List[str] = [[context] * 4 for context in examples[context_name]]
_snake_case : Dict = examples[question_header_name]
_snake_case : List[str] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__lowercase )
]
# Flatten out
_snake_case : List[Any] = list(chain(*__lowercase ) )
_snake_case : str = list(chain(*__lowercase ) )
# Tokenize
_snake_case : Any = tokenizer(
__lowercase , __lowercase , truncation=__lowercase , max_length=__lowercase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_snake_case : Any = raw_datasets["train"]
if data_args.max_train_samples is not None:
_snake_case : str = min(len(__lowercase ) , data_args.max_train_samples )
_snake_case : Union[str, Any] = train_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_snake_case : Optional[Any] = train_dataset.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_snake_case : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_snake_case : Any = min(len(__lowercase ) , data_args.max_eval_samples )
_snake_case : Optional[Any] = eval_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_snake_case : List[Any] = eval_dataset.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_snake_case : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__lowercase ):
_snake_case ,_snake_case : Dict = eval_predictions
_snake_case : List[str] = np.argmax(__lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_snake_case : Dict = Trainer(
model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , )
# Training
if training_args.do_train:
_snake_case : Tuple = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Union[str, Any] = last_checkpoint
_snake_case : Tuple = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case : List[str] = train_result.metrics
_snake_case : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
_snake_case : Tuple = min(__lowercase , len(__lowercase ) )
trainer.log_metrics("train" , __lowercase )
trainer.save_metrics("train" , __lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_snake_case : Any = trainer.evaluate()
_snake_case : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
_snake_case : Tuple = min(__lowercase , len(__lowercase ) )
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
_snake_case : List[str] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'nllb-moe'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase_=128_112 , lowercase_=1_024 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=0.05 , lowercase_=0.05 , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=1_024 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=2 , lowercase_=True , lowercase_=False , lowercase_="float32" , lowercase_=False , lowercase_=128 , lowercase_=64 , lowercase_=4 , lowercase_=4 , lowercase_=0.001 , lowercase_=0.001 , lowercase_="all" , lowercase_=False , lowercase_=False , lowercase_=1.0 , lowercase_=0.2 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=False , **lowercase_ , ):
_snake_case : Any = vocab_size
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = d_model
_snake_case : Any = encoder_ffn_dim
_snake_case : int = encoder_layers
_snake_case : Dict = encoder_attention_heads
_snake_case : Optional[int] = decoder_ffn_dim
_snake_case : List[str] = decoder_layers
_snake_case : Union[str, Any] = decoder_attention_heads
_snake_case : Tuple = dropout
_snake_case : Any = attention_dropout
_snake_case : Any = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : Any = init_std
_snake_case : List[str] = encoder_layerdrop
_snake_case : List[str] = decoder_layerdrop
_snake_case : List[str] = use_cache
_snake_case : Dict = encoder_layers
_snake_case : str = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : Dict = router_z_loss_coef
_snake_case : List[Any] = router_aux_loss_coef
_snake_case : str = decoder_sparse_step
_snake_case : Tuple = encoder_sparse_step
_snake_case : str = num_experts
_snake_case : Optional[Any] = expert_capacity
_snake_case : List[str] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
_snake_case : Dict = router_dtype
_snake_case : int = router_ignore_padding_tokens
_snake_case : Any = batch_prioritized_routing
_snake_case : Dict = second_expert_policy
_snake_case : List[str] = normalize_router_prob_before_dropping
_snake_case : Tuple = moe_eval_capacity_token_fraction
_snake_case : Any = moe_token_dropout
_snake_case : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , ) | 670 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'open-llama'
def __init__( self , lowercase_=100_000 , lowercase_=4_096 , lowercase_=11_008 , lowercase_=32 , lowercase_=32 , lowercase_="silu" , lowercase_=2_048 , lowercase_=0.02 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=False , lowercase_=True , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=True , lowercase_=None , **lowercase_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Dict = hidden_size
_snake_case : Tuple = intermediate_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Tuple = hidden_act
_snake_case : str = initializer_range
_snake_case : int = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[Any] = kwargs.pop(
"use_memorry_efficient_attention" , lowercase_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Optional[Any] = attention_dropout_prob
_snake_case : Union[str, Any] = use_stable_embedding
_snake_case : Optional[int] = shared_input_output_embedding
_snake_case : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
_snake_case : List[Any] = self.rope_scaling.get("type" , lowercase_ )
_snake_case : str = self.rope_scaling.get("factor" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 670 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 1 |
import operator as op
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Union[str, Any] = lambda __lowercase , __lowercase : int(x / y ) # noqa: E731 integer division operation
_snake_case : List[Any] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_snake_case : List[str] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_snake_case : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 670 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Dict:
'''simple docstring'''
_snake_case : Any = tmp_path / "cache"
_snake_case : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Union[str, Any] = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = tmp_path / "cache"
_snake_case : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_snake_case : Union[str, Any] = features.copy() if features else default_expected_features
_snake_case : Tuple = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : int = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[str] = tmp_path / "cache"
_snake_case : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_snake_case : List[Any] = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if issubclass(__lowercase , __lowercase ):
_snake_case : Optional[int] = parquet_path
elif issubclass(__lowercase , __lowercase ):
_snake_case : Optional[Any] = [parquet_path]
_snake_case : List[Any] = tmp_path / "cache"
_snake_case : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_snake_case : Optional[Any] = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Any = tmp_path / "cache"
_snake_case : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : List[str] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case (__lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = tmp_path / "cache"
_snake_case : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Tuple = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : str = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Dict:
'''simple docstring'''
if split:
_snake_case : Optional[int] = {split: parquet_path}
else:
_snake_case : int = "train"
_snake_case : Any = {"train": parquet_path, "test": parquet_path}
_snake_case : Any = tmp_path / "cache"
_snake_case : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_snake_case : int = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Tuple = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_snake_case : int = pq.ParquetFile(tmp_path / "foo.parquet" )
_snake_case : Any = pf.read()
assert dataset.data.table == output_table
def snake_case (__lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
_snake_case : List[str] = {"image": [image_path]}
_snake_case : List[str] = Features({"image": Image()} )
_snake_case : List[Any] = Dataset.from_dict(__lowercase , features=__lowercase )
_snake_case : Optional[Any] = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : List[str] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def snake_case (__lowercase , __lowercase ) -> Any:
'''simple docstring'''
assert get_writer_batch_size(__lowercase ) == expected | 670 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 1 |
from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None ):
_snake_case : Any = data
_snake_case : Dict = previous
_snake_case : str = next_node
def __str__( self ):
return f"""{self.data}"""
def UpperCamelCase ( self ):
return self.data
def UpperCamelCase ( self ):
return self.next
def UpperCamelCase ( self ):
return self.previous
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : List[str] = head
def __iter__( self ):
return self
def UpperCamelCase ( self ):
if not self.current:
raise StopIteration
else:
_snake_case : Optional[Any] = self.current.get_data()
_snake_case : int = self.current.get_next()
return value
class lowercase_ :
def __init__( self ):
_snake_case : Dict = None # First node in list
_snake_case : Union[str, Any] = None # Last node in list
def __str__( self ):
_snake_case : int = self.head
_snake_case : List[str] = []
while current is not None:
nodes.append(current.get_data() )
_snake_case : Dict = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self , lowercase_ ):
_snake_case : Tuple = self.head
while current:
if current.get_data() == value:
return True
_snake_case : List[Any] = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def UpperCamelCase ( self ):
if self.head:
return self.head.get_data()
return None
def UpperCamelCase ( self ):
if self.tail:
return self.tail.get_data()
return None
def UpperCamelCase ( self , lowercase_ ):
if self.head is None:
_snake_case : Dict = node
_snake_case : Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[int] = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = node
_snake_case : Union[str, Any] = node.previous
if node.get_previous() is None:
_snake_case : Any = node_to_insert
else:
_snake_case : str = node_to_insert
_snake_case : Union[str, Any] = node_to_insert
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = node
_snake_case : Any = node.next
if node.get_next() is None:
_snake_case : Optional[int] = node_to_insert
else:
_snake_case : str = node_to_insert
_snake_case : Optional[int] = node_to_insert
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = 1
_snake_case : List[Any] = Node(lowercase_ )
_snake_case : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
_snake_case : Optional[Any] = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Any = self.head
while node:
if node.get_data() == item:
return node
_snake_case : int = node.get_next()
raise Exception("Node not found" )
def UpperCamelCase ( self , lowercase_ ):
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
_snake_case : Any = self.head.get_next()
if node == self.tail:
_snake_case : Tuple = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCamelCase ( lowercase_ ):
if node.get_next():
_snake_case : Any = node.previous
if node.get_previous():
_snake_case : Tuple = node.next
_snake_case : Union[str, Any] = None
_snake_case : List[Any] = None
def UpperCamelCase ( self ):
return self.head is None
def snake_case () -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 1 |
from collections import defaultdict
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_snake_case : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase_ ) )
]
_snake_case : Union[str, Any] = defaultdict(lowercase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_snake_case : Optional[Any] = (1 << len(lowercase_ )) - 1
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_snake_case : int = self.count_ways_until(lowercase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_snake_case : str = total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase ( self , lowercase_ ):
# Store the list of persons for each task
for i in range(len(lowercase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowercase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 670 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'OwlViTImageProcessor'
_lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : str = kwargs.pop("feature_extractor" )
_snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="max_length" , lowercase_="np" , **lowercase_ ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
_snake_case : List[Any] = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
_snake_case : str = []
# Maximum number of queries across batch
_snake_case : Tuple = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
_snake_case : Optional[int] = t + [" "] * (max_num_queries - len(lowercase_ ))
_snake_case : Optional[int] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_snake_case : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_snake_case : Any = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : List[str] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_snake_case : List[str] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_snake_case : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_snake_case : Optional[int] = BatchEncoding()
_snake_case : List[str] = input_ids
_snake_case : str = attention_mask
if query_images is not None:
_snake_case : List[str] = BatchEncoding()
_snake_case : Any = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
_snake_case : List[Any] = query_pixel_values
if images is not None:
_snake_case : Union[str, Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case (__lowercase , __lowercase ) -> str | Literal[False]:
'''simple docstring'''
_snake_case : Dict = list(__lowercase )
_snake_case : str = list(__lowercase )
_snake_case : Dict = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = "_"
if count > 1:
return False
else:
return "".join(__lowercase )
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
_snake_case : Optional[Any] = []
while True:
_snake_case : Any = ["$"] * len(__lowercase )
_snake_case : Optional[int] = []
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
_snake_case : int = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : List[str] = "*"
_snake_case : Optional[Any] = "*"
temp.append("X" )
for i in range(len(__lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowercase ) == 0:
return pi
_snake_case : str = list(set(__lowercase ) )
def snake_case (__lowercase , __lowercase ) -> list[str]:
'''simple docstring'''
_snake_case : List[str] = []
for minterm in minterms:
_snake_case : List[Any] = ""
for _ in range(__lowercase ):
_snake_case : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowercase )
return temp
def snake_case (__lowercase , __lowercase , __lowercase ) -> bool:
'''simple docstring'''
_snake_case : List[str] = list(__lowercase )
_snake_case : List[str] = list(__lowercase )
_snake_case : Dict = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case (__lowercase , __lowercase ) -> list[str]:
'''simple docstring'''
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = [0] * len(__lowercase )
for i in range(len(chart[0] ) ):
_snake_case : str = 0
_snake_case : List[Any] = -1
for j in range(len(__lowercase ) ):
if chart[j][i] == 1:
count += 1
_snake_case : List[str] = j
if count == 1:
_snake_case : Tuple = 1
for i in range(len(__lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowercase ) ):
_snake_case : str = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : List[Any] = 0
_snake_case : Union[str, Any] = -1
_snake_case : Optional[int] = 0
for i in range(len(__lowercase ) ):
_snake_case : int = chart[i].count(1 )
if count_n > max_n:
_snake_case : List[Any] = count_n
_snake_case : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowercase ) ):
_snake_case : str = 0
def snake_case (__lowercase , __lowercase ) -> list[list[int]]:
'''simple docstring'''
_snake_case : str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )]
for i in range(len(__lowercase ) ):
_snake_case : Optional[int] = prime_implicants[i].count("_" )
for j in range(len(__lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowercase ):
_snake_case : Any = 1
return chart
def snake_case () -> None:
'''simple docstring'''
_snake_case : List[Any] = int(input("Enter the no. of variables\n" ) )
_snake_case : Tuple = [
float(__lowercase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_snake_case : Dict = decimal_to_binary(__lowercase , __lowercase )
_snake_case : str = check(__lowercase )
print("Prime Implicants are:" )
print(__lowercase )
_snake_case : int = prime_implicant_chart(__lowercase , __lowercase )
_snake_case : Any = selection(__lowercase , __lowercase )
print("Essential Prime Implicants are:" )
print(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 670 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 1 |
from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
import operator
def snake_case (__lowercase , __lowercase = False , __lowercase = None ) -> list:
'''simple docstring'''
_snake_case : int = operator.lt if reverse else operator.gt
_snake_case : Optional[int] = solution or []
if not arr:
return solution
_snake_case : Any = [arr.pop(0 )]
for i, item in enumerate(__lowercase ):
if _operator(__lowercase , sublist[-1] ):
sublist.append(__lowercase )
arr.pop(__lowercase )
# merging sublist into solution list
if not solution:
solution.extend(__lowercase )
else:
while sublist:
_snake_case : List[str] = sublist.pop(0 )
for i, xx in enumerate(__lowercase ):
if not _operator(__lowercase , __lowercase ):
solution.insert(__lowercase , __lowercase )
break
else:
solution.append(__lowercase )
strand_sort(__lowercase , __lowercase , __lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 670 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : str = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_snake_case : Optional[Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase ) | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCamelCase ( self ):
_snake_case : List[str] = self.dummy_uncond_unet
_snake_case : Union[str, Any] = PNDMScheduler()
_snake_case : Optional[Any] = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Union[str, Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" ).images
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Union[str, Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" , return_dict=lowercase_ )[0]
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
_snake_case : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Any = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : List[Any] = "google/ddpm-cifar10-32"
_snake_case : Optional[int] = UNetaDModel.from_pretrained(lowercase_ )
_snake_case : Optional[int] = PNDMScheduler()
_snake_case : Tuple = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : List[Any] = pndm(generator=lowercase_ , output_type="numpy" ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : List[str] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 670 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ['CLIPFeatureExtractor']
__SCREAMING_SNAKE_CASE : List[str] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<sep>" , lowercase_="<pad>" , lowercase_="<cls>" , lowercase_="<mask>" , lowercase_=["<eop>", "<eod>"] , lowercase_ = None , **lowercase_ , ):
_snake_case : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
_snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
_snake_case : int = 3
_snake_case : Any = do_lower_case
_snake_case : Any = remove_space
_snake_case : str = keep_accents
_snake_case : Any = vocab_file
_snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_snake_case : List[str] = jieba
_snake_case : Dict = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase ( self ):
return len(self.sp_model )
def UpperCamelCase ( self ):
_snake_case : str = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_snake_case : Optional[int] = self.__dict__.copy()
_snake_case : str = None
return state
def __setstate__( self , lowercase_ ):
_snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , lowercase_ ):
if self.remove_space:
_snake_case : str = " ".join(inputs.strip().split() )
else:
_snake_case : Optional[int] = inputs
_snake_case : Dict = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_snake_case : Any = unicodedata.normalize("NFKD" , lowercase_ )
_snake_case : List[str] = "".join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
_snake_case : Optional[int] = outputs.lower()
return outputs
def UpperCamelCase ( self , lowercase_ ):
_snake_case : List[str] = self.preprocess_text(lowercase_ )
_snake_case : Tuple = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
_snake_case : List[str] = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_snake_case : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case : Optional[Any] = cur_pieces[1:]
else:
_snake_case : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def UpperCamelCase ( self , lowercase_ ):
return self.sp_model.PieceToId(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
return self.sp_model.IdToPiece(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1]
return ([0] * len(lowercase_ )) + [1, 1]
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : Dict = [self.sep_token_id]
_snake_case : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
_snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
_snake_case : Optional[Any] = super()._decode(*lowercase_ , **lowercase_ )
_snake_case : Union[str, Any] = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 670 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = VQModel
_lowerCamelCase = 'sample'
@property
def UpperCamelCase ( self , lowercase_=(32, 32) ):
_snake_case : int = 4
_snake_case : int = 3
_snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
return (3, 32, 32)
def UpperCamelCase ( self ):
_snake_case : Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_snake_case : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase_ )
_snake_case : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self ):
_snake_case : Any = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_snake_case : Dict = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_snake_case : List[Any] = image.to(lowercase_ )
with torch.no_grad():
_snake_case : Dict = model(lowercase_ ).sample
_snake_case : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case : Tuple = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) ) | 670 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def snake_case (__lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Dict:
'''simple docstring'''
_snake_case : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case : Optional[int] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_snake_case : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Tuple = dct.pop(__lowercase )
_snake_case : Optional[Any] = val
@torch.no_grad()
def snake_case (__lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Dict = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__lowercase )
_snake_case : Tuple = False
_snake_case : str = False
_snake_case : List[str] = False
_snake_case : Any = False
if "vqa" in checkpoint_url:
_snake_case : Dict = True
_snake_case : Union[str, Any] = 3_129
_snake_case : List[Any] = "huggingface/label-files"
_snake_case : Optional[Any] = "vqa2-id2label.json"
_snake_case : Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_snake_case : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : int = {v: k for k, v in idalabel.items()}
_snake_case : Optional[int] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
_snake_case : Dict = True
_snake_case : List[Any] = 2
_snake_case : Optional[int] = {0: "False", 1: "True"}
_snake_case : Optional[Any] = {v: k for k, v in config.idalabel.items()}
_snake_case : Optional[int] = 3
_snake_case : Optional[Any] = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
_snake_case : Dict = True
_snake_case : Union[str, Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
_snake_case : Any = True
_snake_case : Dict = ViltForMaskedLM(__lowercase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_snake_case : str = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
_snake_case : Optional[Any] = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_snake_case ,_snake_case : int = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
_snake_case : Optional[int] = ViltImageProcessor(size=384 )
_snake_case : Tuple = BertTokenizer.from_pretrained("bert-base-uncased" )
_snake_case : Dict = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_snake_case : List[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowercase ).raw )
_snake_case : str = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowercase ).raw )
_snake_case : Any = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_snake_case : Optional[Any] = processor(__lowercase , __lowercase , return_tensors="pt" )
_snake_case : int = processor(__lowercase , __lowercase , return_tensors="pt" )
_snake_case : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_snake_case : Tuple = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__lowercase ).raw )
if mlm_model:
_snake_case : Optional[Any] = "a bunch of [MASK] laying on a [MASK]."
else:
_snake_case : Optional[Any] = "How many cats are there?"
_snake_case : List[str] = processor(__lowercase , __lowercase , return_tensors="pt" )
_snake_case : Any = model(**__lowercase )
# Verify outputs
if mlm_model:
_snake_case : List[Any] = torch.Size([1, 11, 30_522] )
_snake_case : Union[str, Any] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1e-4 )
# verify masked token prediction equals "cats"
_snake_case : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_snake_case : Any = torch.Size([1, 3_129] )
_snake_case : Optional[int] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1e-4 )
# verify vqa prediction equals "2"
_snake_case : str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_snake_case : Dict = torch.Size([1, 2] )
_snake_case : Optional[int] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 670 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : str = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__SCREAMING_SNAKE_CASE : int = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( __snake_case , __snake_case ):
@register_to_config
def __init__( self , lowercase_ = 128 , lowercase_ = 256 , lowercase_ = 2_000.0 , lowercase_ = 768 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 64 , lowercase_ = 2_048 , lowercase_ = 0.1 , ):
super().__init__()
_snake_case : Optional[int] = nn.Sequential(
nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_ ) , nn.SiLU() , )
_snake_case : Optional[Any] = nn.Embedding(lowercase_ , lowercase_ )
_snake_case : List[Any] = False
_snake_case : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
_snake_case : List[Any] = nn.Dropout(p=lowercase_ )
_snake_case : int = nn.ModuleList()
for lyr_num in range(lowercase_ ):
# FiLM conditional T5 decoder
_snake_case : List[Any] = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ )
self.decoders.append(lowercase_ )
_snake_case : Any = TaLayerNorm(lowercase_ )
_snake_case : Optional[Any] = nn.Dropout(p=lowercase_ )
_snake_case : List[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case ,_snake_case ,_snake_case : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case : Union[str, Any] = self.conditioning_emb(lowercase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case : Any = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case : Any = torch.broadcast_to(
torch.arange(lowercase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case : Dict = self.position_encoding(lowercase_ )
_snake_case : Optional[int] = self.continuous_inputs_projection(lowercase_ )
inputs += position_encodings
_snake_case : Union[str, Any] = self.dropout(lowercase_ )
# decoder: No padding present.
_snake_case : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case : int = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case : Tuple = lyr(
lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0]
_snake_case : Optional[int] = self.decoder_norm(lowercase_ )
_snake_case : Optional[int] = self.post_dropout(lowercase_ )
_snake_case : Any = self.spec_out(lowercase_ )
return spec_out
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-6 ):
super().__init__()
_snake_case : Dict = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
_snake_case : Dict = self.layer[0](
lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , )
if encoder_hidden_states is not None:
_snake_case : List[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
_snake_case : int = self.layer[1](
lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case : int = self.layer[-1](lowercase_ , lowercase_ )
return (hidden_states,)
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
super().__init__()
_snake_case : Dict = TaLayerNorm(lowercase_ )
_snake_case : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ )
_snake_case : Optional[int] = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ )
_snake_case : Any = nn.Dropout(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_=None , lowercase_=None , ):
# pre_self_attention_layer_norm
_snake_case : Dict = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
_snake_case : Union[str, Any] = self.FiLMLayer(lowercase_ , lowercase_ )
# Self-attention block
_snake_case : List[str] = self.attention(lowercase_ )
_snake_case : Optional[int] = hidden_states + self.dropout(lowercase_ )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
super().__init__()
_snake_case : List[Any] = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_ )
_snake_case : Dict = TaLayerNorm(lowercase_ , eps=lowercase_ )
_snake_case : Tuple = nn.Dropout(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_=None , lowercase_=None , ):
_snake_case : Dict = self.layer_norm(lowercase_ )
_snake_case : Union[str, Any] = self.attention(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case : Optional[Any] = hidden_states + self.dropout(lowercase_ )
return layer_output
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
super().__init__()
_snake_case : int = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ )
_snake_case : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_ )
_snake_case : List[Any] = TaLayerNorm(lowercase_ , eps=lowercase_ )
_snake_case : str = nn.Dropout(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_=None ):
_snake_case : str = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
_snake_case : Union[str, Any] = self.film(lowercase_ , lowercase_ )
_snake_case : Any = self.DenseReluDense(lowercase_ )
_snake_case : List[Any] = hidden_states + self.dropout(lowercase_ )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
super().__init__()
_snake_case : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
_snake_case : Optional[int] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
_snake_case : Any = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
_snake_case : List[Any] = nn.Dropout(lowercase_ )
_snake_case : List[str] = NewGELUActivation()
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = self.act(self.wi_a(lowercase_ ) )
_snake_case : List[str] = self.wi_a(lowercase_ )
_snake_case : int = hidden_gelu * hidden_linear
_snake_case : int = self.dropout(lowercase_ )
_snake_case : str = self.wo(lowercase_ )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_=1e-6 ):
super().__init__()
_snake_case : Union[str, Any] = nn.Parameter(torch.ones(lowercase_ ) )
_snake_case : List[str] = eps
def UpperCamelCase ( self , lowercase_ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case : Any = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase_ )
_snake_case : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
def UpperCamelCase ( self , lowercase_ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowercase_ , 3.0 )) ))
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ ):
super().__init__()
_snake_case : int = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[str] = self.scale_bias(lowercase_ )
_snake_case ,_snake_case : Dict = torch.chunk(lowercase_ , 2 , -1 )
_snake_case : Optional[Any] = x * (1 + scale) + shift
return x | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__SCREAMING_SNAKE_CASE : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__SCREAMING_SNAKE_CASE : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_MAPPING
__SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModel)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__SCREAMING_SNAKE_CASE : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__SCREAMING_SNAKE_CASE : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__SCREAMING_SNAKE_CASE : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class lowercase_ ( _BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
with open(__lowercase ) as metadata_file:
_snake_case : Any = json.load(__lowercase )
_snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=__lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_snake_case : Optional[int] = torch.load(__lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
_snake_case : Optional[Any] = load_original_entity_vocab(__lowercase )
# add an entry for [MASK2]
_snake_case : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case : Optional[int] = AddedToken("<ent>" , lstrip=__lowercase , rstrip=__lowercase )
_snake_case : Optional[Any] = AddedToken("<ent2>" , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , "tokenizer_config.json" ) , "r" ) as f:
_snake_case : List[Any] = json.load(__lowercase )
_snake_case : Any = "MLukeTokenizer"
with open(os.path.join(__lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
_snake_case : Dict = MLukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_snake_case : List[str] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["#"] )[0]
_snake_case : Any = state_dict["embeddings.word_embeddings.weight"]
_snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
_snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
_snake_case : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_snake_case : List[str] = state_dict[bias_name]
_snake_case : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
_snake_case : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
_snake_case : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case : List[str] = F"""encoder.layer.{layer_index}.attention.self."""
_snake_case : int = state_dict[prefix + matrix_name]
_snake_case : Dict = state_dict[prefix + matrix_name]
_snake_case : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case : int = state_dict["entity_embeddings.entity_embeddings.weight"]
_snake_case : List[Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_snake_case : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_snake_case : int = state_dict["entity_predictions.bias"]
_snake_case : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_snake_case : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_snake_case : int = LukeForMaskedLM(config=__lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_snake_case : Dict = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_snake_case : Optional[Any] = state_dict[key]
else:
_snake_case : Any = state_dict[key]
_snake_case ,_snake_case : Dict = model.load_state_dict(__lowercase , strict=__lowercase )
if set(__lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_snake_case : Any = MLukeTokenizer.from_pretrained(__lowercase , task="entity_classification" )
_snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_snake_case : Union[str, Any] = (0, 9)
_snake_case : List[str] = tokenizer(__lowercase , entity_spans=[span] , return_tensors="pt" )
_snake_case : Any = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case : Union[str, Any] = torch.Size((1, 33, 768) )
_snake_case : List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case : Union[str, Any] = torch.Size((1, 1, 768) )
_snake_case : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(__lowercase )
_snake_case : str = "Tokyo is the capital of <mask>."
_snake_case : Any = (24, 30)
_snake_case : Union[str, Any] = tokenizer(__lowercase , entity_spans=[span] , return_tensors="pt" )
_snake_case : Any = model(**__lowercase )
_snake_case : Union[str, Any] = encoding["input_ids"][0].tolist()
_snake_case : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowercase )
_snake_case : List[Any] = outputs.entity_logits[0][0].argmax().item()
_snake_case : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowercase ) )
model.save_pretrained(__lowercase )
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[int] = ["[MASK]", "[PAD]", "[UNK]"]
_snake_case : int = [json.loads(__lowercase ) for line in open(__lowercase )]
_snake_case : str = {}
for entry in data:
_snake_case : Tuple = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_snake_case : Optional[int] = entity_id
break
_snake_case : Union[str, Any] = F"""{language}:{entity_name}"""
_snake_case : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 670 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__SCREAMING_SNAKE_CASE : Tuple = HUGGINGFACE_HUB_CACHE
__SCREAMING_SNAKE_CASE : Tuple = 'config.json'
__SCREAMING_SNAKE_CASE : str = 'diffusion_pytorch_model.bin'
__SCREAMING_SNAKE_CASE : Dict = 'diffusion_flax_model.msgpack'
__SCREAMING_SNAKE_CASE : int = 'model.onnx'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'diffusion_pytorch_model.safetensors'
__SCREAMING_SNAKE_CASE : List[Any] = 'weights.pb'
__SCREAMING_SNAKE_CASE : Dict = 'https://huggingface.co'
__SCREAMING_SNAKE_CASE : str = default_cache_path
__SCREAMING_SNAKE_CASE : Any = 'diffusers_modules'
__SCREAMING_SNAKE_CASE : Optional[Any] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__SCREAMING_SNAKE_CASE : Dict = ['fp16', 'non-ema']
__SCREAMING_SNAKE_CASE : List[str] = '.self_attn' | 670 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 1 |
import numpy
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_snake_case : Optional[int] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_snake_case : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_snake_case : Optional[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
_snake_case : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_snake_case : str = numpy.zeros(output_array.shape )
def UpperCamelCase ( self ):
_snake_case : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_snake_case : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_snake_case : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase ( self ):
_snake_case : str = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_snake_case : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_snake_case : Union[str, Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
for iteration in range(1 , iterations + 1 ):
_snake_case : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
_snake_case : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Dict = input_arr
_snake_case : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_snake_case : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_snake_case : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case (__lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def snake_case (__lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def snake_case () -> int:
'''simple docstring'''
_snake_case : Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_snake_case : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_snake_case : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase , output_array=__lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase , iterations=10 , give_loss=__lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 670 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
import numpy as np
class lowercase_ :
def __init__( self ):
_snake_case : int = (0, 0)
_snake_case : Optional[int] = None
_snake_case : str = 0
_snake_case : List[Any] = 0
_snake_case : List[str] = 0
def __eq__( self , lowercase_ ):
return self.position == cell.position
def UpperCamelCase ( self ):
print(self.position )
class lowercase_ :
def __init__( self , lowercase_=(5, 5) ):
_snake_case : Any = np.zeros(lowercase_ )
_snake_case : Any = world_size[0]
_snake_case : int = world_size[1]
def UpperCamelCase ( self ):
print(self.w )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_snake_case : Tuple = cell.position[0]
_snake_case : int = cell.position[1]
_snake_case : Optional[Any] = []
for n in neughbour_cord:
_snake_case : int = current_x + n[0]
_snake_case : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_snake_case : Dict = Cell()
_snake_case : Optional[Any] = (x, y)
_snake_case : str = cell
neighbours.append(lowercase_ )
return neighbours
def snake_case (__lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Dict = []
_snake_case : List[str] = []
_open.append(__lowercase )
while _open:
_snake_case : Optional[int] = np.argmin([n.f for n in _open] )
_snake_case : Tuple = _open[min_f]
_closed.append(_open.pop(__lowercase ) )
if current == goal:
break
for n in world.get_neigbours(__lowercase ):
for c in _closed:
if c == n:
continue
_snake_case : List[Any] = current.g + 1
_snake_case ,_snake_case : List[Any] = n.position
_snake_case ,_snake_case : Optional[Any] = goal.position
_snake_case : List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
_snake_case : Tuple = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowercase )
_snake_case : Any = []
while current.parent is not None:
path.append(current.position )
_snake_case : Any = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = Gridworld()
# Start position and goal
__SCREAMING_SNAKE_CASE : Optional[int] = Cell()
__SCREAMING_SNAKE_CASE : int = (0, 0)
__SCREAMING_SNAKE_CASE : Union[str, Any] = Cell()
__SCREAMING_SNAKE_CASE : Tuple = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__SCREAMING_SNAKE_CASE : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
print(world.w) | 670 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 1 |
def snake_case () -> List[str]:
'''simple docstring'''
_snake_case : Dict = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 670 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'bart'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase_=50_265 , lowercase_=1_024 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_="gelu" , lowercase_=1_024 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=False , lowercase_=True , lowercase_=3 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=True , lowercase_=2 , lowercase_=2 , **lowercase_ , ):
_snake_case : Tuple = vocab_size
_snake_case : List[Any] = max_position_embeddings
_snake_case : Optional[int] = d_model
_snake_case : Optional[int] = encoder_ffn_dim
_snake_case : str = encoder_layers
_snake_case : Dict = encoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : Dict = decoder_layers
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : Any = dropout
_snake_case : Dict = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : Dict = activation_function
_snake_case : str = init_std
_snake_case : Optional[int] = encoder_layerdrop
_snake_case : str = decoder_layerdrop
_snake_case : Any = classifier_dropout
_snake_case : List[str] = use_cache
_snake_case : List[str] = encoder_layers
_snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowercase_ ):
_snake_case : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class lowercase_ ( __snake_case ):
@property
def UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_snake_case : Optional[Any] = {0: "batch"}
_snake_case : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_snake_case : List[Any] = {0: "batch", 1: "decoder_sequence"}
_snake_case : int = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_snake_case ,_snake_case : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
_snake_case : Any = {0: "batch", 2: "past_sequence + sequence"}
_snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"}
else:
_snake_case : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Union[str, Any] = super().outputs
else:
_snake_case : List[Any] = super(lowercase_ , self ).outputs
if self.use_past:
_snake_case ,_snake_case : int = self.num_layers
for i in range(lowercase_ ):
_snake_case : Any = {0: "batch", 2: "past_sequence + sequence"}
_snake_case : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
_snake_case : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
_snake_case : Any = seq_length if not self.use_past else 1
_snake_case : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_snake_case : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_snake_case : Tuple = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_snake_case ,_snake_case : Optional[Any] = common_inputs["input_ids"].shape
_snake_case : Any = common_inputs["decoder_input_ids"].shape[1]
_snake_case ,_snake_case : str = self.num_attention_heads
_snake_case : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : Dict = decoder_seq_length + 3
_snake_case : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case : Any = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
_snake_case : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case ,_snake_case : List[str] = self.num_layers
_snake_case : List[str] = min(lowercase_ , lowercase_ )
_snake_case : Optional[Any] = max(lowercase_ , lowercase_ ) - min_num_layers
_snake_case : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
_snake_case : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
_snake_case : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_snake_case ,_snake_case : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_snake_case : List[str] = seqlen + 2
_snake_case ,_snake_case : Union[str, Any] = self.num_layers
_snake_case ,_snake_case : str = self.num_attention_heads
_snake_case : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : Optional[Any] = common_inputs["attention_mask"].dtype
_snake_case : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
_snake_case : str = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : int = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : Dict = tokenizer.num_special_tokens_to_add(lowercase_ )
_snake_case : Any = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case : int = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
_snake_case : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
_snake_case : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : str = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
_snake_case : int = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ ) | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : Tuple = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4)) | 670 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'visual_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=512 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=False , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
_snake_case : Tuple = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : Dict = hidden_size
_snake_case : int = visual_embedding_dim
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : str = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[str] = layer_norm_eps
_snake_case : List[Any] = bypass_transformer
_snake_case : Optional[int] = special_visual_initialize | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = True , lowercase_ = "arrow" , **lowercase_ , ):
super().__init__(
split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , **lowercase_ , )
_snake_case : Union[str, Any] = load_from_cache_file
_snake_case : str = file_format
_snake_case : int = Spark(
df=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , working_dir=lowercase_ , **lowercase_ , )
def UpperCamelCase ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_snake_case : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 670 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 1 |
__SCREAMING_SNAKE_CASE : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Union[str, Any] = [False] * len(__lowercase )
_snake_case : Any = [s]
_snake_case : List[Any] = True
while queue:
_snake_case : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_snake_case : Dict = True
_snake_case : Dict = u
return visited[t]
def snake_case (__lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Union[str, Any] = [-1] * (len(__lowercase ))
_snake_case : Any = 0
_snake_case : Any = []
_snake_case : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_snake_case : Dict = float("Inf" )
_snake_case : Any = sink
while s != source:
# Find the minimum value in select path
_snake_case : Union[str, Any] = min(__lowercase , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : List[Any] = sink
while v != source:
_snake_case : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : List[str] = parent[v]
for i in range(len(__lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : int = Rectangle(height=0.5 , width=0.5 )
_snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : Optional[Any] = [mem.copy() for i in range(6 )]
_snake_case : Optional[int] = [mem.copy() for i in range(6 )]
_snake_case : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : List[str] = Text("CPU" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : Optional[int] = [mem.copy() for i in range(1 )]
_snake_case : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : List[Any] = Text("GPU" , font_size=24 )
_snake_case : List[Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.align_to(lowercase_ , lowercase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("Model" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , )
_snake_case : Any = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_snake_case : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Union[str, Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=2.5 ) , Write(lowercase_ ) , Write(lowercase_ ) )
self.add(lowercase_ )
_snake_case : Tuple = []
_snake_case : Optional[int] = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
cpu_target.move_to(lowercase_ )
cpu_target.generate_target()
_snake_case : int = 0.46 / 4
_snake_case : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0 )
cpu_targs.append(lowercase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase_ ) )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 1 |
import math
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
return math.pow(__lowercase , 2 ) - a
def snake_case (__lowercase ) -> float:
'''simple docstring'''
return 2 * x
def snake_case (__lowercase ) -> float:
'''simple docstring'''
_snake_case : Optional[Any] = 2.0
while start <= a:
_snake_case : Union[str, Any] = math.pow(__lowercase , 2 )
return start
def snake_case (__lowercase , __lowercase = 9_999 , __lowercase = 0.00000000000001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case : Optional[Any] = get_initial_point(__lowercase )
for _ in range(__lowercase ):
_snake_case : List[Any] = value
_snake_case : Optional[Any] = value - fx(__lowercase , __lowercase ) / fx_derivative(__lowercase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'M-CLIP'
def __init__( self , lowercase_=1_024 , lowercase_=768 , **lowercase_ ):
_snake_case : Union[str, Any] = transformerDimSize
_snake_case : Optional[Any] = imageDimSize
super().__init__(**lowercase_ )
class lowercase_ ( __snake_case ):
_lowerCamelCase = MCLIPConfig
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : Any = XLMRobertaModel(lowercase_ )
_snake_case : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : int = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Optional[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs | 670 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__SCREAMING_SNAKE_CASE : str = 'Create a default config file for Accelerate with only a few flags set.'
def snake_case (__lowercase="no" , __lowercase = default_json_config_file , __lowercase = False ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Any = Path(__lowercase )
path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_snake_case : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_snake_case : Any = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
_snake_case : Any = num_gpus
_snake_case : Any = False
if num_gpus > 1:
_snake_case : Union[str, Any] = "MULTI_GPU"
else:
_snake_case : Tuple = "NO"
elif is_xpu_available() and use_xpu:
_snake_case : str = torch.xpu.device_count()
_snake_case : Tuple = num_xpus
_snake_case : str = False
if num_xpus > 1:
_snake_case : int = "MULTI_XPU"
else:
_snake_case : Dict = "NO"
elif is_npu_available():
_snake_case : Union[str, Any] = torch.npu.device_count()
_snake_case : Tuple = num_npus
_snake_case : Tuple = False
if num_npus > 1:
_snake_case : Union[str, Any] = "MULTI_NPU"
else:
_snake_case : int = "NO"
else:
_snake_case : Dict = 0
_snake_case : Any = True
_snake_case : Optional[Any] = 1
_snake_case : Optional[Any] = "NO"
_snake_case : int = ClusterConfig(**__lowercase )
config.to_json_file(__lowercase )
return path
def snake_case (__lowercase , __lowercase ) -> Dict:
'''simple docstring'''
_snake_case : Any = parser.add_parser("default" , parents=__lowercase , help=__lowercase , formatter_class=__lowercase )
parser.add_argument(
"--config_file" , default=__lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowercase )
return parser
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
_snake_case : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" ) | 670 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 1 |
import re
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : List[str] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(__lowercase , __lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895')) | 670 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : List[str] = "laion/clap-htsat-unfused"
_snake_case : Tuple = tempfile.mkdtemp()
def UpperCamelCase ( self , **lowercase_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[Any] = self.get_feature_extractor()
_snake_case : List[str] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[str] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : List[Any] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : str = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : int = floats_list((3, 1_000) )
_snake_case : Tuple = feature_extractor(lowercase_ , return_tensors="np" )
_snake_case : List[Any] = processor(audios=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_feature_extractor()
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : Any = "This is a test string"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_feature_extractor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Optional[int] = processor.batch_decode(lowercase_ )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_feature_extractor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) | 670 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 1 |
from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 1 |
def snake_case (__lowercase ) -> list:
'''simple docstring'''
_snake_case : Dict = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_snake_case ,_snake_case : Dict = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = list(range(1_0, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''') | 670 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 1 |
import cva
import numpy as np
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ ):
if k in (0.04, 0.06):
_snake_case : List[Any] = k
_snake_case : int = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
return str(self.k )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = cva.imread(lowercase_ , 0 )
_snake_case ,_snake_case : str = img.shape
_snake_case : list[list[int]] = []
_snake_case : int = img.copy()
_snake_case : int = cva.cvtColor(lowercase_ , cva.COLOR_GRAY2RGB )
_snake_case ,_snake_case : List[Any] = np.gradient(lowercase_ )
_snake_case : List[str] = dx**2
_snake_case : List[str] = dy**2
_snake_case : str = dx * dy
_snake_case : List[str] = 0.04
_snake_case : List[str] = self.window_size // 2
for y in range(lowercase_ , h - offset ):
for x in range(lowercase_ , w - offset ):
_snake_case : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : Any = (wxx * wyy) - (wxy**2)
_snake_case : Dict = wxx + wyy
_snake_case : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = HarrisCorner(0.04, 3)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE : Any = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : Tuple = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LEDTokenizer
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ):
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
_snake_case : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
_snake_case : Tuple = getattr(lowercase_ , pre_tok_state.pop("type" ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase_ )
_snake_case : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case : List[Any] = "post_processor"
_snake_case : Optional[Any] = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
_snake_case : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : str = tuple(state["sep"] )
if "cls" in state:
_snake_case : Any = tuple(state["cls"] )
_snake_case : Optional[int] = False
if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = True
if state.get("trim_offsets" , lowercase_ ) != trim_offsets:
_snake_case : str = trim_offsets
_snake_case : Optional[int] = True
if changes_to_apply:
_snake_case : int = getattr(lowercase_ , state.pop("type" ) )
_snake_case : int = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self , lowercase_ ):
_snake_case : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
_snake_case : Optional[int] = value
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
_snake_case : Optional[int] = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
_snake_case : Any = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : Tuple = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_=None ):
_snake_case : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
_snake_case : Dict = super()._pad(
encoded_inputs=lowercase_ , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
# Load from model defaults
if return_attention_mask is None:
_snake_case : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(lowercase_ )
if needs_to_be_padded:
_snake_case : Union[str, Any] = len(lowercase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case : Union[str, Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case : Optional[int] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BlenderbotSmallTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , lowercase_=True , **lowercase_ , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , )
_snake_case : int = add_prefix_space
def UpperCamelCase ( self , lowercase_ , lowercase_=None ):
_snake_case : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : Optional[int] = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 670 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'mctct'
def __init__( self , lowercase_=8_065 , lowercase_=1_536 , lowercase_=36 , lowercase_=6_144 , lowercase_=4 , lowercase_=384 , lowercase_=920 , lowercase_=1e-5 , lowercase_=0.3 , lowercase_="relu" , lowercase_=0.02 , lowercase_=0.3 , lowercase_=0.3 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=1 , lowercase_=0.3 , lowercase_=1 , lowercase_=(7,) , lowercase_=(3,) , lowercase_=80 , lowercase_=1 , lowercase_=None , lowercase_="sum" , lowercase_=False , **lowercase_ , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
_snake_case : Tuple = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : str = intermediate_size
_snake_case : str = num_attention_heads
_snake_case : Optional[Any] = attention_head_dim
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : int = layerdrop
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = initializer_range
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = pad_token_id
_snake_case : int = bos_token_id
_snake_case : Tuple = eos_token_id
_snake_case : Any = conv_glu_dim
_snake_case : Any = conv_dropout
_snake_case : Optional[int] = num_conv_layers
_snake_case : List[str] = input_feat_per_channel
_snake_case : Tuple = input_channels
_snake_case : int = conv_channels
_snake_case : Tuple = ctc_loss_reduction
_snake_case : Dict = ctc_zero_infinity
# prevents config testing fail with exporting to json
_snake_case : Union[str, Any] = list(lowercase_ )
_snake_case : Any = list(lowercase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 670 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 1 |
from ...processing_utils import ProcessorMixin
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'feature_extractor']
_lowerCamelCase = 'TvltImageProcessor'
_lowerCamelCase = 'TvltFeatureExtractor'
def __init__( self , lowercase_ , lowercase_ ):
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_ )
_snake_case : List[str] = image_processor
_snake_case : Dict = feature_extractor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=False , *lowercase_ , **lowercase_ , ):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
_snake_case : Tuple = None
if images is not None:
_snake_case : Any = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_ )
if images_mixed is not None:
_snake_case : Optional[Any] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_ )
if audio is not None:
_snake_case : List[Any] = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_ )
_snake_case : int = {}
if audio is not None:
output_dict.update(lowercase_ )
if images is not None:
output_dict.update(lowercase_ )
if images_mixed_dict is not None:
output_dict.update(lowercase_ )
return output_dict
@property
def UpperCamelCase ( self ):
_snake_case : str = self.image_processor.model_input_names
_snake_case : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 670 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : List[Any] = 'RegNetConfig'
# Base docstring
__SCREAMING_SNAKE_CASE : Tuple = 'facebook/regnet-y-040'
__SCREAMING_SNAKE_CASE : Tuple = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Optional[int] = 'facebook/regnet-y-040'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'tabby, tabby cat'
__SCREAMING_SNAKE_CASE : Tuple = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = "relu" , **lowercase_ , ):
super().__init__(**lowercase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_snake_case : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_snake_case : int = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=lowercase_ , strides=lowercase_ , padding="VALID" , groups=lowercase_ , use_bias=lowercase_ , name="convolution" , )
_snake_case : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
_snake_case : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = self.convolution(self.padding(lowercase_ ) )
_snake_case : Any = self.normalization(lowercase_ )
_snake_case : Dict = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : List[str] = config.num_channels
_snake_case : List[str] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[int] = shape_list(lowercase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_snake_case : List[str] = tf.transpose(lowercase_ , perm=(0, 2, 3, 1) )
_snake_case : List[str] = self.embedder(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ = 2 , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Tuple = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=1 , strides=lowercase_ , use_bias=lowercase_ , name="convolution" )
_snake_case : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def UpperCamelCase ( self , lowercase_ , lowercase_ = False ):
return self.normalization(self.convolution(lowercase_ ) , training=lowercase_ )
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="pooler" )
_snake_case : Tuple = [
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def UpperCamelCase ( self , lowercase_ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_snake_case : Optional[Any] = self.pooler(lowercase_ )
for layer_module in self.attention:
_snake_case : Union[str, Any] = layer_module(lowercase_ )
_snake_case : int = hidden_state * pooled
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Any = in_channels != out_channels or stride != 1
_snake_case : Tuple = max(1 , out_channels // config.groups_width )
_snake_case : List[Any] = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_snake_case : Any = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="layer.2" ),
]
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[int] = hidden_state
for layer_module in self.layers:
_snake_case : List[Any] = layer_module(lowercase_ )
_snake_case : Optional[int] = self.shortcut(lowercase_ )
hidden_state += residual
_snake_case : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Any = in_channels != out_channels or stride != 1
_snake_case : Union[str, Any] = max(1 , out_channels // config.groups_width )
_snake_case : Dict = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_snake_case : int = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="layer.3" ),
]
_snake_case : Optional[int] = ACTaFN[config.hidden_act]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : List[str] = hidden_state
for layer_module in self.layers:
_snake_case : Tuple = layer_module(lowercase_ )
_snake_case : Tuple = self.shortcut(lowercase_ )
hidden_state += residual
_snake_case : List[Any] = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_snake_case : Union[str, Any] = [
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , name="layers.0" ),
*[layer(lowercase_ , lowercase_ , lowercase_ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase ( self , lowercase_ ):
for layer_module in self.layers:
_snake_case : Any = layer_module(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_snake_case : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ , name=f"""stages.{i+1}""" ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ = False , lowercase_ = True ):
_snake_case : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Union[str, Any] = hidden_states + (hidden_state,)
_snake_case : Union[str, Any] = stage_module(lowercase_ )
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
@keras_serializable
class lowercase_ ( tf.keras.layers.Layer ):
_lowerCamelCase = RegNetConfig
def __init__( self , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Dict = config
_snake_case : Any = TFRegNetEmbeddings(lowercase_ , name="embedder" )
_snake_case : Optional[Any] = TFRegNetEncoder(lowercase_ , name="encoder" )
_snake_case : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="pooler" )
@unpack_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ):
_snake_case : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Union[str, Any] = self.embedder(lowercase_ , training=lowercase_ )
_snake_case : Union[str, Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
_snake_case : str = encoder_outputs[0]
_snake_case : str = self.pooler(lowercase_ )
# Change to NCHW output format have uniformity in the modules
_snake_case : List[str] = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
_snake_case : Any = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_snake_case : str = tuple([tf.transpose(lowercase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase_ ( __snake_case ):
_lowerCamelCase = RegNetConfig
_lowerCamelCase = 'regnet'
_lowerCamelCase = 'pixel_values'
@property
def UpperCamelCase ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__SCREAMING_SNAKE_CASE : Union[str, Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__SCREAMING_SNAKE_CASE : List[str] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __snake_case , )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : Union[str, Any] = TFRegNetMainLayer(lowercase_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=False , ):
_snake_case : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Union[str, Any] = self.regnet(
pixel_values=lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __snake_case , )
class lowercase_ ( __snake_case , __snake_case ):
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : Optional[int] = config.num_labels
_snake_case : List[str] = TFRegNetMainLayer(lowercase_ , name="regnet" )
# classification head
_snake_case : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_=False , ):
_snake_case : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : List[str] = self.regnet(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
_snake_case : Dict = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[int] = self.classifier[0](lowercase_ )
_snake_case : str = self.classifier[1](lowercase_ )
_snake_case : Optional[Any] = None if labels is None else self.hf_compute_loss(labels=lowercase_ , logits=lowercase_ )
if not return_dict:
_snake_case : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states ) | 670 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_snake_case : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Dict = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : str = self.get_rust_tokenizer()
_snake_case : Any = self.get_image_processor()
_snake_case : Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Dict = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Any = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Dict = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : int = self.prepare_image_inputs()
_snake_case : Any = image_processor(lowercase_ , return_tensors="np" )
_snake_case : Tuple = processor(images=lowercase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Tuple = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : str = processor(text=lowercase_ )
_snake_case : int = tokenizer(lowercase_ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : List[Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = "lower newer"
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : Optional[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : str = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = "lower newer"
_snake_case : List[Any] = self.prepare_image_inputs()
_snake_case : int = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 670 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
import string
import numpy
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowercase )
class lowercase_ :
_lowerCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_lowerCamelCase = numpy.vectorize(lambda __snake_case : x % 36 )
_lowerCamelCase = numpy.vectorize(__snake_case )
def __init__( self , lowercase_ ):
_snake_case : Union[str, Any] = self.modulus(lowercase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_snake_case : Any = encrypt_key.shape[0]
def UpperCamelCase ( self , lowercase_ ):
return self.key_string.index(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
return self.key_string[round(lowercase_ )]
def UpperCamelCase ( self ):
_snake_case : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : Tuple = det % len(self.key_string )
_snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(lowercase_ , len(self.key_string ) ) != 1:
_snake_case : Optional[int] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
_snake_case : Dict = chars[-1]
while len(lowercase_ ) % self.break_key != 0:
chars.append(lowercase_ )
return "".join(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : str = self.process_text(text.upper() )
_snake_case : Tuple = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_snake_case : Optional[int] = text[i : i + self.break_key]
_snake_case : Union[str, Any] = [self.replace_letters(lowercase_ ) for char in batch]
_snake_case : int = numpy.array([vec] ).T
_snake_case : List[Any] = self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[
0
]
_snake_case : str = "".join(
self.replace_digits(lowercase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase ( self ):
_snake_case : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : int = det % len(self.key_string )
_snake_case : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_snake_case : Optional[Any] = i
break
_snake_case : int = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase_ ) )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = self.make_decrypt_key()
_snake_case : Union[str, Any] = self.process_text(text.upper() )
_snake_case : str = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_snake_case : int = text[i : i + self.break_key]
_snake_case : str = [self.replace_letters(lowercase_ ) for char in batch]
_snake_case : Dict = numpy.array([vec] ).T
_snake_case : int = self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0]
_snake_case : Dict = "".join(
self.replace_digits(lowercase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case () -> None:
'''simple docstring'''
_snake_case : Any = int(input("Enter the order of the encryption key: " ) )
_snake_case : Optional[Any] = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowercase ):
_snake_case : Tuple = [int(__lowercase ) for x in input().split()]
hill_matrix.append(__lowercase )
_snake_case : Tuple = HillCipher(numpy.array(__lowercase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
_snake_case : Tuple = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
_snake_case : int = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowercase ) )
elif option == "2":
_snake_case : Optional[Any] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 670 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'deit'
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=224 , lowercase_=16 , lowercase_=3 , lowercase_=True , lowercase_=16 , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : List[str] = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[Any] = initializer_range
_snake_case : Tuple = layer_norm_eps
_snake_case : Union[str, Any] = image_size
_snake_case : Optional[int] = patch_size
_snake_case : str = num_channels
_snake_case : Any = qkv_bias
_snake_case : Dict = encoder_stride
class lowercase_ ( __snake_case ):
_lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ):
return 1e-4 | 670 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 1 |
__SCREAMING_SNAKE_CASE : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__SCREAMING_SNAKE_CASE : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[str] = False
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_snake_case : Optional[Any] = chain(next_number(__lowercase ) )
_snake_case : int = number_chain
while number < 10_000_000:
_snake_case : Any = number_chain
number *= 10
return number_chain
def snake_case (__lowercase = 10_000_000 ) -> int:
'''simple docstring'''
for i in range(1 , __lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''') | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__SCREAMING_SNAKE_CASE : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__SCREAMING_SNAKE_CASE : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def snake_case (__lowercase , __lowercase ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(__lowercase ) - np.asarray(__lowercase )) ** 2 ) )
def snake_case (__lowercase , __lowercase ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(__lowercase , __lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def snake_case () -> None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
benchmark() | 670 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 1 |
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowercase )
if number < 1:
_snake_case : Optional[Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowercase )
_snake_case : List[Any] = 1
for i in range(1 , __lowercase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
from __future__ import annotations
from typing import Any
class lowercase_ ( __snake_case ):
pass
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : Any = data
_snake_case : Node | None = None
def __iter__( self ):
_snake_case : List[str] = self
_snake_case : str = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowercase_ )
yield node.data
_snake_case : Optional[Any] = node.next_node
@property
def UpperCamelCase ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = Node(1)
__SCREAMING_SNAKE_CASE : List[Any] = Node(2)
__SCREAMING_SNAKE_CASE : int = Node(3)
__SCREAMING_SNAKE_CASE : List[str] = Node(4)
print(root_node.has_loop) # False
__SCREAMING_SNAKE_CASE : Union[str, Any] = root_node.next_node
print(root_node.has_loop) # True
__SCREAMING_SNAKE_CASE : List[str] = Node(5)
__SCREAMING_SNAKE_CASE : Any = Node(6)
__SCREAMING_SNAKE_CASE : Dict = Node(5)
__SCREAMING_SNAKE_CASE : List[str] = Node(6)
print(root_node.has_loop) # False
__SCREAMING_SNAKE_CASE : int = Node(1)
print(root_node.has_loop) # False | 670 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 1 |
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : List[str] = 0
for ch in input_str:
_snake_case : int = ord(__lowercase )
_snake_case : Tuple = pow(2 , __lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Optional[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.dummy_uncond_unet
_snake_case : Any = DDIMScheduler()
_snake_case : Dict = self.dummy_vq_model
_snake_case : Optional[int] = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : Union[str, Any] = ldm(generator=lowercase_ , num_inference_steps=2 , output_type="numpy" ).images
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : List[Any] = ldm(generator=lowercase_ , num_inference_steps=2 , output_type="numpy" , return_dict=lowercase_ )[0]
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Dict = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
_snake_case : int = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
_snake_case : List[Any] = torch.manual_seed(0 )
_snake_case : List[str] = ldm(generator=lowercase_ , num_inference_steps=5 , output_type="numpy" ).images
_snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_snake_case : Dict = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
_snake_case : Any = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 670 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_snake_case : Tuple = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = logging.get_verbosity()
_snake_case : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_snake_case : Any = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase_ ) as cl:
logger.warning(lowercase_ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase_ ) as cl:
logger.warning(lowercase_ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase_ ) as cl:
logger.warning(lowercase_ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_snake_case : Any = logging.get_logger("transformers.models.bart.tokenization_bart" )
_snake_case : Optional[Any] = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase_ )
_snake_case : Dict = logging.log_levels[env_level_str]
_snake_case : Union[str, Any] = logging.get_verbosity()
self.assertEqual(
lowercase_ , lowercase_ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_snake_case : List[Any] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_snake_case : Dict = logging.logging.getLogger()
with CaptureLogger(lowercase_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def UpperCamelCase ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_snake_case : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_snake_case : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase_ ) as cl:
logger.warning_advice(lowercase_ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase_ ) as cl:
logger.warning_advice(lowercase_ )
self.assertEqual(cl.out , msg + "\n" )
def snake_case () -> Dict:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled() | 670 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case (__lowercase , __lowercase , __lowercase = "x" , __lowercase = 10**-10 , __lowercase = 1 , ) -> complex:
'''simple docstring'''
_snake_case : List[str] = symbols(__lowercase )
_snake_case : Union[str, Any] = lambdify(__lowercase , __lowercase )
_snake_case : Any = lambdify(__lowercase , diff(__lowercase , __lowercase ) )
_snake_case : Tuple = starting_point
while True:
if diff_function(__lowercase ) != 0:
_snake_case : int = prev_guess - multiplicity * func(__lowercase ) / diff_function(
__lowercase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_snake_case : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F'''{newton_raphson('exp(x) - 1', 1_0, precision=0.0_05)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 670 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def snake_case (__lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Dict = b.T
_snake_case : Optional[Any] = np.sum(np.square(__lowercase ) , axis=1 )
_snake_case : Tuple = np.sum(np.square(__lowercase ) , axis=0 )
_snake_case : List[Any] = np.matmul(__lowercase , __lowercase )
_snake_case : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = x.reshape(-1 , 3 )
_snake_case : Optional[int] = squared_euclidean_distance(__lowercase , __lowercase )
return np.argmin(__lowercase , axis=1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['pixel_values']
def __init__( self , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = True , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Any = size if size is not None else {"height": 256, "width": 256}
_snake_case : Dict = get_size_dict(lowercase_ )
_snake_case : List[str] = np.array(lowercase_ ) if clusters is not None else None
_snake_case : Union[str, Any] = do_resize
_snake_case : Optional[int] = size
_snake_case : List[str] = resample
_snake_case : Tuple = do_normalize
_snake_case : Dict = do_color_quantize
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , ):
_snake_case : Optional[int] = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
_snake_case : List[str] = image - 1
return image
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : Any = size if size is not None else self.size
_snake_case : List[str] = get_size_dict(lowercase_ )
_snake_case : int = resample if resample is not None else self.resample
_snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Union[str, Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_snake_case : Any = clusters if clusters is not None else self.clusters
_snake_case : str = np.array(lowercase_ )
_snake_case : Any = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_snake_case : Optional[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
_snake_case : Union[str, Any] = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_snake_case : List[Any] = np.array(lowercase_ )
_snake_case : Any = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_snake_case : str = images.shape[0]
_snake_case : Optional[int] = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_snake_case : Any = list(lowercase_ )
else:
_snake_case : Any = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_snake_case : Optional[int] = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) | 670 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__SCREAMING_SNAKE_CASE : int = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__SCREAMING_SNAKE_CASE : str = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__SCREAMING_SNAKE_CASE : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def snake_case (__lowercase , __lowercase ) -> tuple[str, float]:
'''simple docstring'''
_snake_case : List[str] = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def snake_case (__lowercase , __lowercase ) -> tuple[str, str]:
'''simple docstring'''
_snake_case : Optional[int] = random.randint(0 , len(__lowercase ) - 1 )
_snake_case : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[Any] = list(__lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case : Union[str, Any] = random.choice(__lowercase )
return "".join(__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , ) -> list[str]:
'''simple docstring'''
_snake_case : Optional[int] = []
# Generate more children proportionally to the fitness score.
_snake_case : Union[str, Any] = int(parent_a[1] * 100 ) + 1
_snake_case : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(__lowercase ):
_snake_case : List[Any] = population_score[random.randint(0 , __lowercase )][0]
_snake_case ,_snake_case : List[str] = crossover(parent_a[0] , __lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase , __lowercase ) )
pop.append(mutate(__lowercase , __lowercase ) )
return pop
def snake_case (__lowercase , __lowercase , __lowercase = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_snake_case : List[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case : List[str] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowercase )
# Generate random starting population.
_snake_case : List[str] = []
for _ in range(__lowercase ):
population.append("".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case ,_snake_case : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case : List[Any] = [evaluate(__lowercase , __lowercase ) for item in population]
# Check if there is a matching evolution.
_snake_case : Union[str, Any] = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
_snake_case : List[str] = [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__SCREAMING_SNAKE_CASE : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE : List[Any] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
) | 670 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = inspect.getfile(accelerate.test_utils )
_snake_case : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_snake_case : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase ( self ):
_snake_case : Any = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
_snake_case : Union[str, Any] = [sys.executable] + distributed_args
execute_subprocess_async(lowercase_ , env=os.environ.copy() ) | 670 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case (__lowercase = 8 ) -> str:
'''simple docstring'''
_snake_case : Dict = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
i -= len(__lowercase )
_snake_case : Optional[Any] = i // 3
_snake_case : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_snake_case : Union[str, Any] = (
chars_incl
+ random(__lowercase , quotient + remainder )
+ random(__lowercase , __lowercase )
+ random(__lowercase , __lowercase )
)
_snake_case : Optional[int] = list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
pass # Put your code here...
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
pass # Put your code here...
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
pass # Put your code here...
def snake_case (__lowercase , __lowercase = 8 ) -> bool:
'''simple docstring'''
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
_snake_case : List[Any] = any(char in ascii_uppercase for char in password )
_snake_case : Any = any(char in ascii_lowercase for char in password )
_snake_case : Dict = any(char in digits for char in password )
_snake_case : Optional[int] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case () -> Dict:
'''simple docstring'''
_snake_case : Any = int(input("Please indicate the max length of your password: " ).strip() )
_snake_case : Union[str, Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__lowercase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__lowercase , __lowercase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main() | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__SCREAMING_SNAKE_CASE : List[Any] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__SCREAMING_SNAKE_CASE : Any = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case () -> str:
'''simple docstring'''
_snake_case : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_snake_case : Any = bs[:]
_snake_case : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_snake_case : int = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Dict = set()
_snake_case : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case : Tuple = char
return pairs
class lowercase_ ( __snake_case ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase_ , lowercase_ , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , **lowercase_ , ):
_snake_case : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
_snake_case : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
_snake_case : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
_snake_case : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
_snake_case : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
_snake_case : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
_snake_case : Tuple = json.load(lowercase_ )
_snake_case : Any = {v: k for k, v in self.encoder.items()}
_snake_case : List[Any] = errors # how to handle errors in decoding
_snake_case : Optional[int] = bytes_to_unicode()
_snake_case : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
_snake_case : List[str] = merges_handle.read().split("\n" )[1:-1]
_snake_case : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = {}
_snake_case : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCamelCase ( self ):
return len(self.encoder )
def UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self , lowercase_ ):
if token in self.cache:
return self.cache[token]
_snake_case : Optional[int] = tuple(lowercase_ )
_snake_case : Union[str, Any] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
_snake_case : Tuple = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case ,_snake_case : Any = bigram
_snake_case : int = []
_snake_case : Union[str, Any] = 0
while i < len(lowercase_ ):
try:
_snake_case : Optional[int] = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case : str = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case : str = tuple(lowercase_ )
_snake_case : List[str] = new_word
if len(lowercase_ ) == 1:
break
else:
_snake_case : Optional[Any] = get_pairs(lowercase_ )
_snake_case : Dict = " ".join(lowercase_ )
_snake_case : str = word
return word
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = []
for token in re.findall(self.pat , lowercase_ ):
_snake_case : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(" " ) )
return bpe_tokens
def UpperCamelCase ( self , lowercase_ ):
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self , lowercase_ ):
return self.decoder.get(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = "".join(lowercase_ )
_snake_case : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case : List[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Any = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
_snake_case : List[Any] = 0
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
_snake_case : Optional[Any] = token_index
writer.write(" ".join(lowercase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
_snake_case : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , lowercase_ , lowercase_=False , **lowercase_ ):
_snake_case : str = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
_snake_case : List[Any] = " " + text
return (text, kwargs) | 670 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 1 |
def snake_case (__lowercase ) -> list:
'''simple docstring'''
_snake_case : Dict = [0] * len(__lowercase )
for i in range(1 , len(__lowercase ) ):
# use last results for better performance - dynamic programming
_snake_case : Tuple = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : int = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Dict = j
return prefix_result
def snake_case (__lowercase ) -> int:
'''simple docstring'''
return max(prefix_function(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCamelCase ( self ):
_snake_case : str = None
ops.enable_eager_execution_internal()
_snake_case : List[Any] = tf.config.list_physical_devices("CPU" )
if len(lowercase_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_snake_case : Optional[Any] = tf.config.list_logical_devices(device_type="CPU" )
_snake_case : Any = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_snake_case : List[str] = GradientAccumulator()
_snake_case : str = tf.Variable([4.0, 3.0] )
_snake_case ,_snake_case : Optional[int] = create_optimizer(5e-5 , 10 , 5 )
_snake_case : Any = tf.Variable([0.0, 0.0] , trainable=lowercase_ )
def accumulate_on_replica(lowercase_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowercase_ , lowercase_ ):
with strategy.scope():
_snake_case : str = strategy.experimental_local_results(lowercase_ )
local_variables[0].assign(lowercase_ )
local_variables[1].assign(lowercase_ )
strategy.run(lowercase_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase_ )
def _check_local_values(lowercase_ , lowercase_ ):
_snake_case : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowercase_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 670 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 1 |
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
_snake_case : int = 0
_snake_case : int = 0
_snake_case : Any = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1 )
if weights[index] <= max_weight:
_snake_case : str = values[index] + knapsack(
__lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1 )
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'glpn'
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[32, 64, 160, 256] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=64 , lowercase_=10 , lowercase_=-1 , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = num_encoder_blocks
_snake_case : Union[str, Any] = depths
_snake_case : Any = sr_ratios
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : Any = patch_sizes
_snake_case : Union[str, Any] = strides
_snake_case : Tuple = mlp_ratios
_snake_case : List[Any] = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[Any] = initializer_range
_snake_case : Any = drop_path_rate
_snake_case : List[Any] = layer_norm_eps
_snake_case : List[str] = decoder_hidden_size
_snake_case : Optional[Any] = max_depth
_snake_case : List[str] = head_in_index | 670 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 1 |
import math
import unittest
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase ( self ):
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main() | 670 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : Tuple = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_snake_case : Tuple = MaskFormerConfig(backbone_config=__lowercase )
_snake_case : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_snake_case : int = 847
_snake_case : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_snake_case : List[str] = 150
_snake_case : Any = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_snake_case : Dict = 171
_snake_case : Any = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_snake_case : Union[str, Any] = 133
_snake_case : List[str] = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_snake_case : Union[str, Any] = 19
_snake_case : Any = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_snake_case : Any = 65
_snake_case : str = "mapillary-vistas-id2label.json"
_snake_case : str = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_snake_case : Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
return config
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def snake_case (__lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_snake_case : Union[str, Any] = dct.pop(__lowercase )
_snake_case : List[Any] = val
def snake_case (__lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_snake_case : Optional[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[:dim, :]
_snake_case : int = in_proj_bias[: dim]
_snake_case : Any = in_proj_weight[
dim : dim * 2, :
]
_snake_case : Dict = in_proj_bias[
dim : dim * 2
]
_snake_case : List[Any] = in_proj_weight[
-dim :, :
]
_snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_snake_case : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_snake_case : Any = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : List[Any] = in_proj_weight[: hidden_size, :]
_snake_case : Tuple = in_proj_bias[:config.hidden_size]
_snake_case : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
_snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case : Dict = in_proj_weight[-hidden_size :, :]
_snake_case : Union[str, Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_snake_case : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: hidden_size, :]
_snake_case : Union[str, Any] = in_proj_bias[:config.hidden_size]
_snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_snake_case : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
_snake_case : str = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case () -> torch.Tensor:
'''simple docstring'''
_snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : Optional[int] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = get_maskformer_config(__lowercase )
# load original state_dict
with open(__lowercase , "rb" ) as f:
_snake_case : str = pickle.load(__lowercase )
_snake_case : List[str] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_snake_case : Optional[int] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# update to torch tensors
for key, value in state_dict.items():
_snake_case : Tuple = torch.from_numpy(__lowercase )
# load 🤗 model
_snake_case : Any = MaskFormerForInstanceSegmentation(__lowercase )
model.eval()
for name, param in model.named_parameters():
print(__lowercase , param.shape )
_snake_case ,_snake_case : Any = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowercase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_snake_case : Dict = prepare_img()
if "vistas" in model_name:
_snake_case : Dict = 65
elif "cityscapes" in model_name:
_snake_case : int = 65_535
else:
_snake_case : Dict = 255
_snake_case : Any = True if "ade" in model_name else False
_snake_case : Any = MaskFormerImageProcessor(ignore_index=__lowercase , reduce_labels=__lowercase )
_snake_case : Optional[int] = image_processor(__lowercase , return_tensors="pt" )
_snake_case : Tuple = model(**__lowercase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_snake_case : Tuple = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
import torch
from transformers import AutoModel
class lowercase_ ( torch.nn.Module ):
def __init__( self , lowercase_="sayef/fsner-bert-base-uncased" ):
super(lowercase_ , self ).__init__()
_snake_case : List[Any] = AutoModel.from_pretrained(lowercase_ , return_dict=lowercase_ )
_snake_case : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : Tuple = torch.nn.Softmax(dim=1 )
def UpperCamelCase ( self , **lowercase_ ):
return self.bert(**lowercase_ ).last_hidden_state
def UpperCamelCase ( self , lowercase_ ):
return token_embeddings.sum(2 , keepdim=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=1 ):
return self.softmax(T * self.cos(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = W_supports["sizes"].tolist()
_snake_case : Optional[int] = W_supports["start_token_id"].item()
_snake_case : int = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : int = self.BERT(**lowercase_ )
_snake_case : int = self.BERT(**lowercase_ )
_snake_case : Any = None
_snake_case : List[Any] = None
_snake_case : Tuple = W_supports["input_ids"] == start_token_id
_snake_case : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowercase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Optional[int] = support_sizes[i - 1]
_snake_case : Optional[int] = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Dict = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : List[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[Any] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Optional[int] = p_start
_snake_case : Optional[Any] = p_end
return p_starts, p_ends | 670 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1 , lowercase_=False , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : Dict = vocab_size
_snake_case : str = d_embed
_snake_case : Union[str, Any] = d_proj
_snake_case : str = cutoffs + [vocab_size]
_snake_case : Optional[Any] = [0] + self.cutoffs
_snake_case : Tuple = div_val
_snake_case : Optional[Any] = self.cutoffs[0]
_snake_case : Tuple = len(self.cutoffs ) - 1
_snake_case : Dict = self.shortlist_size + self.n_clusters
_snake_case : Union[str, Any] = keep_order
_snake_case : Optional[int] = []
_snake_case : Any = []
def UpperCamelCase ( self , lowercase_ ):
if self.n_clusters > 0:
_snake_case : Optional[int] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowercase_ , name="cluster_weight" )
_snake_case : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowercase_ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case : int = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowercase_ , name=f"""out_projs_._{i}""" , )
self.out_projs.append(lowercase_ )
else:
self.out_projs.append(lowercase_ )
_snake_case : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowercase_ , name=f"""out_layers_._{i}_._weight""" , )
_snake_case : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowercase_ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case ,_snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.d_embed // (self.div_val**i)
_snake_case : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowercase_ , name=f"""out_projs_._{i}""" )
self.out_projs.append(lowercase_ )
_snake_case : Optional[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowercase_ , name=f"""out_layers_._{i}_._weight""" , )
_snake_case : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowercase_ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(lowercase_ )
@staticmethod
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
_snake_case : int = x
if proj is not None:
_snake_case : Optional[int] = tf.einsum("ibd,ed->ibe" , lowercase_ , lowercase_ )
return tf.einsum("ibd,nd->ibn" , lowercase_ , lowercase_ ) + b
@staticmethod
def UpperCamelCase ( lowercase_ , lowercase_ ):
_snake_case : List[str] = shape_list(lowercase_ )
_snake_case : List[Any] = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case : List[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=True , lowercase_=False ):
_snake_case : Union[str, Any] = 0
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._logit(lowercase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case : int = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase_ , logits=lowercase_ )
_snake_case : str = tf.nn.log_softmax(lowercase_ , axis=-1 )
else:
_snake_case : List[str] = shape_list(lowercase_ )
_snake_case : Tuple = []
_snake_case : Optional[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case ,_snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case : Optional[int] = (target >= l_idx) & (target < r_idx)
_snake_case : List[Any] = tf.where(lowercase_ )
_snake_case : Dict = tf.boolean_mask(lowercase_ , lowercase_ ) - l_idx
if self.div_val == 1:
_snake_case : int = self.out_layers[0][0][l_idx:r_idx]
_snake_case : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case : Optional[Any] = self.out_layers[i][0]
_snake_case : Dict = self.out_layers[i][1]
if i == 0:
_snake_case : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case : Union[str, Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case : List[str] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[0] )
_snake_case : List[str] = tf.nn.log_softmax(lowercase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case : Tuple = tf.boolean_mask(lowercase_ , lowercase_ )
_snake_case : Optional[int] = self._gather_logprob(lowercase_ , lowercase_ )
else:
_snake_case : int = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[i] )
_snake_case : List[Any] = tf.nn.log_softmax(lowercase_ )
_snake_case : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case : Any = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowercase_ )
if target is not None:
_snake_case : int = tf.boolean_mask(lowercase_ , lowercase_ )
_snake_case : Dict = tf.boolean_mask(lowercase_ , lowercase_ )
_snake_case : str = self._gather_logprob(lowercase_ , lowercase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowercase_ , -cur_logprob , shape_list(lowercase_ ) )
_snake_case : Dict = tf.concat(lowercase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case : int = tf.reduce_mean(lowercase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowercase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowercase_ , name=self.name , aggregation="mean" if return_mean else "" )
return out | 670 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Any = random.Random()
def snake_case (__lowercase , __lowercase=1.0 , __lowercase=None , __lowercase=None ) -> str:
'''simple docstring'''
if rng is None:
_snake_case : List[str] = global_rng
_snake_case : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=400 , lowercase_=2_000 , lowercase_=10 , lowercase_=160 , lowercase_=8 , lowercase_=0.0 , lowercase_=4_000 , lowercase_=False , lowercase_=True , ):
_snake_case : str = parent
_snake_case : List[Any] = batch_size
_snake_case : List[Any] = min_seq_length
_snake_case : List[str] = max_seq_length
_snake_case : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case : Tuple = padding_value
_snake_case : List[str] = sampling_rate
_snake_case : Dict = return_attention_mask
_snake_case : Union[str, Any] = do_normalize
_snake_case : Union[str, Any] = feature_size
_snake_case : Tuple = chunk_length
_snake_case : Union[str, Any] = hop_length
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , lowercase_=False , lowercase_=False ):
def _flatten(lowercase_ ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
_snake_case : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case : Tuple = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase ( self ):
_snake_case : List[Any] = WhisperFeatureExtractionTester(self )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : int = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
_snake_case : int = self.feature_extraction_class.from_pretrained(lowercase_ )
_snake_case : Tuple = feat_extract_first.to_dict()
_snake_case : str = feat_extract_second.to_dict()
_snake_case : Dict = feat_extract_first.mel_filters
_snake_case : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Union[str, Any] = os.path.join(lowercase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase_ )
_snake_case : List[Any] = self.feature_extraction_class.from_json_file(lowercase_ )
_snake_case : int = feat_extract_first.to_dict()
_snake_case : str = feat_extract_second.to_dict()
_snake_case : List[str] = feat_extract_first.mel_filters
_snake_case : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_snake_case : Union[str, Any] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case : int = feature_extractor(lowercase_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_snake_case : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_snake_case : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
_snake_case : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case : int = np.asarray(lowercase_ )
_snake_case : Optional[Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test truncation required
_snake_case : Dict = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_snake_case : Tuple = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
_snake_case : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_snake_case : List[str] = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
_snake_case : Dict = feature_extractor(lowercase_ , return_tensors="np" ).input_features
_snake_case : List[str] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def UpperCamelCase ( self ):
import torch
_snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
_snake_case : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_snake_case : Dict = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Dict = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_snake_case : int = self._load_datasamples(1 )
_snake_case : Any = WhisperFeatureExtractor()
_snake_case : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1e-4 ) )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case : Any = self._load_datasamples(1 )[0]
_snake_case : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_snake_case : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1e-3 ) ) | 670 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Tuple = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['MobileNetV2FeatureExtractor']
__SCREAMING_SNAKE_CASE : Any = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 1 |
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : Optional[int] = len(__lowercase )
_snake_case : int = sum(__lowercase )
_snake_case : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_snake_case : Union[str, Any] = True
for i in range(1 , s + 1 ):
_snake_case : Tuple = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_snake_case : List[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
_snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_snake_case : int = s - 2 * j
break
return diff | 670 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 670 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 1 |
from __future__ import annotations
import math
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case (__lowercase ) -> list[int]:
'''simple docstring'''
_snake_case : Dict = str(__lowercase )
_snake_case : Optional[int] = [n]
for i in range(1 , len(__lowercase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if len(str(__lowercase ) ) > 3:
if not is_prime(int(str(__lowercase )[-3:] ) ) or not is_prime(int(str(__lowercase )[:3] ) ):
return False
return True
def snake_case (__lowercase = 11 ) -> list[int]:
'''simple docstring'''
_snake_case : list[int] = []
_snake_case : Optional[int] = 13
while len(__lowercase ) != count:
if validate(__lowercase ):
_snake_case : Optional[int] = list_truncated_nums(__lowercase )
if all(is_prime(__lowercase ) for i in list_nums ):
list_truncated_primes.append(__lowercase )
num += 2
return list_truncated_primes
def snake_case () -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''') | 670 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = '▁'
__SCREAMING_SNAKE_CASE : Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__SCREAMING_SNAKE_CASE : Any = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'facebook/m2m100_418M': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['input_ids', 'attention_mask']
_lowerCamelCase = []
_lowerCamelCase = []
def __init__( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<pad>" , lowercase_="<unk>" , lowercase_="m2m100" , lowercase_ = None , lowercase_=8 , **lowercase_ , ):
_snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case : Optional[int] = language_codes
_snake_case : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case : List[Any] = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case : int = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase_ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase_ , tgt_lang=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , language_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase_ , **lowercase_ , )
_snake_case : Union[str, Any] = vocab_file
_snake_case : Tuple = load_json(lowercase_ )
_snake_case : Dict = {v: k for k, v in self.encoder.items()}
_snake_case : List[Any] = spm_file
_snake_case : Optional[Any] = load_spm(lowercase_ , self.sp_model_kwargs )
_snake_case : Tuple = len(self.encoder )
_snake_case : Tuple = {
self.get_lang_token(lowercase_ ): self.encoder_size + i for i, lang_code in enumerate(lowercase_ )
}
_snake_case : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase_ )}
_snake_case : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case : int = src_lang if src_lang is not None else "en"
_snake_case : List[str] = tgt_lang
_snake_case : List[str] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case : str = num_madeup_words
@property
def UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , lowercase_ ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def UpperCamelCase ( self , lowercase_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = []
_snake_case : Union[str, Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
_snake_case : List[Any] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
_snake_case : str = [1] * len(self.prefix_tokens )
_snake_case : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self ):
_snake_case : Optional[int] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_snake_case : int = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self , lowercase_ ):
_snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_snake_case : str = {}
_snake_case : Dict = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : List[str] = Path(lowercase_ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case : Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_snake_case : Optional[int] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , "wb" ) as fi:
_snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def UpperCamelCase ( self , lowercase_ , lowercase_ = "en" , lowercase_ = None , lowercase_ = "ro" , **lowercase_ , ):
_snake_case : List[Any] = src_lang
_snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_snake_case : str = src_lang
_snake_case : Dict = self(lowercase_ , add_special_tokens=lowercase_ , **lowercase_ )
_snake_case : Optional[Any] = self.get_lang_id(lowercase_ )
_snake_case : Tuple = tgt_lang_id
return inputs
def UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[Any] = self.get_lang_token(lowercase_ )
_snake_case : Optional[int] = self.lang_token_to_id[lang_token]
_snake_case : Union[str, Any] = [self.cur_lang_id]
_snake_case : Optional[int] = [self.eos_token_id]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Dict = self.get_lang_token(lowercase_ )
_snake_case : Union[str, Any] = self.lang_token_to_id[lang_token]
_snake_case : Dict = [self.cur_lang_id]
_snake_case : List[str] = [self.eos_token_id]
def UpperCamelCase ( self , lowercase_ ):
return self.lang_code_to_token[lang]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = self.get_lang_token(lowercase_ )
return self.lang_token_to_id[lang_token]
def snake_case (__lowercase , __lowercase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_snake_case : str = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def snake_case (__lowercase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__lowercase , "r" ) as f:
return json.load(__lowercase )
def snake_case (__lowercase , __lowercase ) -> None:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=2 ) | 670 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = UnCLIPImageVariationPipeline
_lowerCamelCase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_lowerCamelCase = False
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
return self.time_input_dim
@property
def UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ):
return 100
@property
def UpperCamelCase ( self ):
_snake_case : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : str = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
_snake_case : List[str] = UnCLIPTextProjModel(**lowercase_ )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Union[str, Any] = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
_snake_case : List[str] = UNetaDConditionModel(**lowercase_ )
return model
@property
def UpperCamelCase ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCamelCase ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_snake_case : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCamelCase ( self ):
_snake_case : List[str] = self.dummy_decoder
_snake_case : Dict = self.dummy_text_proj
_snake_case : Tuple = self.dummy_text_encoder
_snake_case : List[str] = self.dummy_tokenizer
_snake_case : List[str] = self.dummy_super_res_first
_snake_case : int = self.dummy_super_res_last
_snake_case : Any = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1_000 , )
_snake_case : Dict = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1_000 , )
_snake_case : int = CLIPImageProcessor(crop_size=32 , size=32 )
_snake_case : int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCamelCase ( self , lowercase_ , lowercase_=0 , lowercase_=True ):
_snake_case : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
_snake_case : Dict = torch.manual_seed(lowercase_ )
else:
_snake_case : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
if pil_image:
_snake_case : str = input_image * 0.5 + 0.5
_snake_case : Optional[Any] = input_image.clamp(0 , 1 )
_snake_case : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_snake_case : List[str] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "cpu"
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**lowercase_ )
_snake_case : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Any = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Dict = pipe(**lowercase_ )
_snake_case : str = output.images
_snake_case : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Union[str, Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "cpu"
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**lowercase_ )
_snake_case : Optional[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Optional[int] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Optional[Any] = pipe(**lowercase_ )
_snake_case : Optional[int] = output.images
_snake_case : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : List[Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
_snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Union[str, Any] = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "cpu"
_snake_case : Tuple = self.get_dummy_components()
_snake_case : Optional[int] = self.pipeline_class(**lowercase_ )
_snake_case : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : int = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Tuple = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
_snake_case : Any = pipe(**lowercase_ )
_snake_case : str = output.images
_snake_case : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Tuple = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
_snake_case : int = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_snake_case : List[str] = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : int = torch.device("cpu" )
class lowercase_ :
_lowerCamelCase = 1
_snake_case : Any = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**lowercase_ )
_snake_case : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
_snake_case : Optional[Any] = pipe.decoder.dtype
_snake_case : str = 1
_snake_case : List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_snake_case : Optional[int] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
_snake_case : int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_snake_case : Tuple = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
_snake_case : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
_snake_case : Optional[int] = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ ).images
_snake_case : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
# Don't pass image, instead pass embedding
_snake_case : Dict = pipeline_inputs.pop("image" )
_snake_case : str = pipe.image_encoder(lowercase_ ).image_embeds
_snake_case : List[Any] = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ , image_embeddings=lowercase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCamelCase ( self ):
_snake_case : Tuple = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_snake_case : Any = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , expected_max_diff=lowercase_ )
@skip_mps
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = torch_device == "cpu"
_snake_case : Optional[Any] = True
_snake_case : Any = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_snake_case : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase_ )
@skip_mps
def UpperCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
_snake_case : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
_snake_case : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
_snake_case : Optional[int] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
_snake_case : List[str] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
_snake_case : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : List[str] = pipeline(
lowercase_ , generator=lowercase_ , output_type="np" , )
_snake_case : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ , 15 ) | 670 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 1 |
import requests
def snake_case (__lowercase , __lowercase ) -> None:
'''simple docstring'''
_snake_case : Dict = {"Content-Type": "application/json"}
_snake_case : Optional[int] = requests.post(__lowercase , json={"text": message_body} , headers=__lowercase )
if response.status_code != 200:
_snake_case : Optional[Any] = (
"Request to slack returned an error "
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>') | 670 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 1 |
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def snake_case () -> int:
'''simple docstring'''
_snake_case : Optional[Any] = 9
_snake_case : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case : Optional[int] = kruskal(__lowercase , __lowercase )
_snake_case : Dict = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowercase ) == sorted(__lowercase ) | 670 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 1 |
from typing import Any
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : Any = data
_snake_case : List[str] = None
class lowercase_ :
def __init__( self ):
_snake_case : List[str] = None
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.head
while temp is not None:
print(temp.data , end=" " )
_snake_case : Dict = temp.next
print()
def UpperCamelCase ( self , lowercase_ ):
_snake_case : str = Node(lowercase_ )
_snake_case : Dict = self.head
_snake_case : Tuple = new_node
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
if node_data_a == node_data_a:
return
else:
_snake_case : int = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : Any = node_a.next
_snake_case : int = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : str = node_a.next
if node_a is None or node_a is None:
return
_snake_case ,_snake_case : str = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list() | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
import numpy as np
def snake_case (__lowercase , __lowercase , __lowercase = 1e-12 , __lowercase = 100 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(__lowercase )[0] == np.shape(__lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowercase )[0] == np.shape(__lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowercase ) == np.iscomplexobj(__lowercase )
_snake_case : Any = np.iscomplexobj(__lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_snake_case : str = False
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = 0
_snake_case : Optional[int] = 1e12
while not convergence:
# Multiple matrix by the vector.
_snake_case : List[str] = np.dot(__lowercase , __lowercase )
# Normalize the resulting output vector.
_snake_case : Tuple = w / np.linalg.norm(__lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_snake_case : str = vector.conj().T if is_complex else vector.T
_snake_case : Optional[Any] = np.dot(__lowercase , np.dot(__lowercase , __lowercase ) )
# Check convergence.
_snake_case : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_snake_case : int = True
_snake_case : Optional[int] = lambda_
if is_complex:
_snake_case : List[str] = np.real(lambda_ )
return lambda_, vector
def snake_case () -> None:
'''simple docstring'''
_snake_case : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_snake_case : Dict = np.array([41, 4, 20] )
_snake_case : Optional[int] = real_input_matrix.astype(np.complexaaa )
_snake_case : int = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_snake_case : List[str] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_snake_case : List[Any] = real_input_matrix
_snake_case : Optional[int] = real_vector
elif problem_type == "complex":
_snake_case : int = complex_input_matrix
_snake_case : Dict = complex_vector
# Our implementation.
_snake_case ,_snake_case : int = power_iteration(__lowercase , __lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_snake_case ,_snake_case : List[Any] = np.linalg.eigh(__lowercase )
# Last eigenvalue is the maximum one.
_snake_case : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_snake_case : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowercase ) - np.abs(__lowercase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 670 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 1 |
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def snake_case () -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 670 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__SCREAMING_SNAKE_CASE : List[str] = 3
def snake_case (__lowercase ) -> int:
'''simple docstring'''
print("Generating primitive root of p" )
while True:
_snake_case : Optional[Any] = random.randrange(3 , __lowercase )
if pow(__lowercase , 2 , __lowercase ) == 1:
continue
if pow(__lowercase , __lowercase , __lowercase ) == 1:
continue
return g
def snake_case (__lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_snake_case : List[str] = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
_snake_case : List[str] = primitive_root(__lowercase ) # one primitive root on modulo p.
_snake_case : Any = random.randrange(3 , __lowercase ) # private_key -> have to be greater than 2 for safety.
_snake_case : List[Any] = cryptomath.find_mod_inverse(pow(__lowercase , __lowercase , __lowercase ) , __lowercase )
_snake_case : Tuple = (key_size, e_a, e_a, p)
_snake_case : Union[str, Any] = (key_size, d)
return public_key, private_key
def snake_case (__lowercase , __lowercase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_snake_case ,_snake_case : Union[str, Any] = generate_key(__lowercase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def snake_case () -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal" , 2_048 )
print("Key files generation successful" )
if __name__ == "__main__":
main() | 670 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 1 |
def snake_case (__lowercase = 50 ) -> int:
'''simple docstring'''
_snake_case : Dict = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''') | 670 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : List[str] = 0
def UpperCamelCase ( self ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[str] = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_snake_case : Tuple = os.path.join(lowercase_ , "fake-roberta" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
try:
AutoConfig.register("custom" , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register("model" , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register("bert" , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_snake_case : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
lowercase_ , "bert-base is not a local folder and is not a valid model identifier" ):
_snake_case : Optional[int] = AutoConfig.from_pretrained("bert-base" )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
lowercase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase_ , revision="aaaaaa" )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
lowercase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
_snake_case : Any = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def UpperCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
_snake_case : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
_snake_case : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase_ )
_snake_case : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
_snake_case : int = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def UpperCamelCase ( self ):
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'new-model'
try:
AutoConfig.register("new-model" , lowercase_ )
# If remote code is not set, the default is to use local
_snake_case : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_snake_case : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_snake_case : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 670 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 1 |
from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure) | 670 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 1 |
def snake_case (__lowercase , __lowercase , __lowercase = 0 , __lowercase = 0 ) -> int:
'''simple docstring'''
_snake_case : Tuple = right or len(__lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowercase , __lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def snake_case (__lowercase ) -> list[list[float]]:
'''simple docstring'''
_snake_case : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_snake_case : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
_snake_case : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_snake_case ,_snake_case : Optional[int] = matrix[1][1], matrix[0][0]
_snake_case ,_snake_case : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_snake_case : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
_snake_case : int = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_snake_case : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_snake_case : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_snake_case : Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_snake_case : Optional[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_snake_case : Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_snake_case : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_snake_case : List[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_snake_case : int = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_snake_case : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_snake_case : Any = array(__lowercase )
for i in range(3 ):
for j in range(3 ):
_snake_case : Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_snake_case : Optional[int] = array(__lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__lowercase )
# Calculate the inverse of the matrix
return [[float(d(__lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." ) | 670 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 1 |
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
return str(__lowercase ) == str(__lowercase )[::-1]
def snake_case (__lowercase ) -> int:
'''simple docstring'''
return int(__lowercase ) + int(str(__lowercase )[::-1] )
def snake_case (__lowercase = 10_000 ) -> int:
'''simple docstring'''
_snake_case : List[Any] = []
for num in range(1 , __lowercase ):
_snake_case : Tuple = 0
_snake_case : str = num
while iterations < 50:
_snake_case : Union[str, Any] = sum_reverse(__lowercase )
iterations += 1
if is_palindrome(__lowercase ):
break
else:
lychrel_nums.append(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''') | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.