code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
__UpperCamelCase :Optional[int] = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
__UpperCamelCase :List[str] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = get_config_parser()
__UpperCamelCase :List[Any] = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 1
|
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 4 ):
'''simple docstring'''
__UpperCamelCase :int = abs(SCREAMING_SNAKE_CASE ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE )] for y in range(SCREAMING_SNAKE_CASE )]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_row(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_column(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [list(SCREAMING_SNAKE_CASE ) for x in zip(*SCREAMING_SNAKE_CASE )]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = matrix[::-1]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in matrix:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 43
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv2FeatureExtractor''']
__lowercase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : List[str] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : List[Any] = False
a__ : int = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> Union[str, Any]:
__UpperCamelCase :List[str] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class in get_values(__lowercase):
__UpperCamelCase :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Any:
__UpperCamelCase :List[Any] = parent
__UpperCamelCase :Any = batch_size
__UpperCamelCase :Dict = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :List[Any] = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :Any = use_labels
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :List[Any] = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :List[str] = max_position_embeddings
__UpperCamelCase :List[str] = type_vocab_size
__UpperCamelCase :Dict = type_sequence_label_size
__UpperCamelCase :str = initializer_range
__UpperCamelCase :int = num_labels
__UpperCamelCase :int = num_choices
__UpperCamelCase :List[str] = scope
__UpperCamelCase :Any = embedding_size
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :str = None
if self.use_input_mask:
__UpperCamelCase :List[str] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :int = None
__UpperCamelCase :str = None
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Dict = TFMobileBertModel(config=__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Dict = model(__lowercase)
__UpperCamelCase :Union[str, Any] = [input_ids, input_mask]
__UpperCamelCase :Optional[Any] = model(__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :int = TFMobileBertForMaskedLM(config=__lowercase)
__UpperCamelCase :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Union[str, Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :int = TFMobileBertForNextSentencePrediction(config=__lowercase)
__UpperCamelCase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = TFMobileBertForPreTraining(config=__lowercase)
__UpperCamelCase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = self.num_labels
__UpperCamelCase :Dict = TFMobileBertForSequenceClassification(config=__lowercase)
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = self.num_choices
__UpperCamelCase :Optional[Any] = TFMobileBertForMultipleChoice(config=__lowercase)
__UpperCamelCase :Dict = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Union[str, Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Union[str, Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :Optional[int] = TFMobileBertForTokenClassification(config=__lowercase)
__UpperCamelCase :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :int = TFMobileBertForQuestionAnswering(config=__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[str] = config_and_inputs
__UpperCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
__UpperCamelCase :str = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCamelCase :List[str] = TFMobileBertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
__UpperCamelCase :Any = tf.constant([[0, 1, 2, 3, 4, 5]])
__UpperCamelCase :int = model(__lowercase)[0]
__UpperCamelCase :Optional[Any] = [1, 6, 30_522]
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Dict = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4)
| 43
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self) -> int:
__UpperCamelCase :List[str] = {}
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=1) -> str:
if self.graph.get(__lowercase):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
__UpperCamelCase :Union[str, Any] = [[w, v]]
if not self.graph.get(__lowercase):
__UpperCamelCase :Optional[int] = []
def UpperCamelCase__ ( self) -> Tuple:
return list(self.graph)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[Any]:
if self.graph.get(__lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase)
def UpperCamelCase__ ( self , __lowercase=-2 , __lowercase=-1) -> Any:
if s == d:
return []
__UpperCamelCase :List[Any] = []
__UpperCamelCase :str = []
if s == -2:
__UpperCamelCase :str = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :List[Any] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(__lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase) != 0:
__UpperCamelCase :Union[str, Any] = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :int = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return visited
def UpperCamelCase__ ( self , __lowercase=-1) -> Any:
if c == -1:
__UpperCamelCase :Optional[int] = floor(random() * 10_000) + 10
for i in range(__lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
__UpperCamelCase :List[str] = floor(random() * c) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1)
def UpperCamelCase__ ( self , __lowercase=-2) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = deque()
__UpperCamelCase :Dict = []
if s == -2:
__UpperCamelCase :int = list(self.graph)[0]
d.append(__lowercase)
visited.append(__lowercase)
while d:
__UpperCamelCase :Tuple = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def UpperCamelCase__ ( self , __lowercase) -> str:
__UpperCamelCase :Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
return len(self.graph[u])
def UpperCamelCase__ ( self , __lowercase=-2) -> Optional[Any]:
__UpperCamelCase :Dict = []
__UpperCamelCase :int = []
if s == -2:
__UpperCamelCase :Optional[Any] = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :int = s
__UpperCamelCase :Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(__lowercase) != 0:
__UpperCamelCase :Any = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :List[Any] = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return sorted_nodes
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = []
__UpperCamelCase :Dict = []
__UpperCamelCase :Tuple = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :Any = -2
__UpperCamelCase :Any = []
__UpperCamelCase :List[str] = s
__UpperCamelCase :int = False
__UpperCamelCase :Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__UpperCamelCase :List[str] = len(__lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase :List[Any] = True
if len(__lowercase) != 0:
__UpperCamelCase :Any = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :List[str] = False
indirect_parents.append(__lowercase)
__UpperCamelCase :Optional[int] = s
__UpperCamelCase :Dict = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return list(__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = []
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :List[str] = -2
__UpperCamelCase :str = []
__UpperCamelCase :Optional[Any] = s
__UpperCamelCase :int = False
__UpperCamelCase :Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__UpperCamelCase :Dict = len(__lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase :List[Any] = True
if len(__lowercase) != 0:
__UpperCamelCase :int = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :Union[str, Any] = False
indirect_parents.append(__lowercase)
__UpperCamelCase :List[Any] = s
__UpperCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return False
def UpperCamelCase__ ( self , __lowercase=-2 , __lowercase=-1) -> Optional[int]:
__UpperCamelCase :int = time()
self.dfs(__lowercase , __lowercase)
__UpperCamelCase :int = time()
return end - begin
def UpperCamelCase__ ( self , __lowercase=-2) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = time()
self.bfs(__lowercase)
__UpperCamelCase :List[Any] = time()
return end - begin
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = {}
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=1) -> List[str]:
# check if the u exists
if self.graph.get(__lowercase):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
__UpperCamelCase :Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(__lowercase):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
__UpperCamelCase :Tuple = [[w, u]]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> str:
if self.graph.get(__lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase)
# the other way round
if self.graph.get(__lowercase):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowercase)
def UpperCamelCase__ ( self , __lowercase=-2 , __lowercase=-1) -> Tuple:
if s == d:
return []
__UpperCamelCase :Dict = []
__UpperCamelCase :Any = []
if s == -2:
__UpperCamelCase :Tuple = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(__lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase) != 0:
__UpperCamelCase :List[Any] = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :int = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return visited
def UpperCamelCase__ ( self , __lowercase=-1) -> Optional[int]:
if c == -1:
__UpperCamelCase :Optional[int] = floor(random() * 10_000) + 10
for i in range(__lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
__UpperCamelCase :List[Any] = floor(random() * c) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1)
def UpperCamelCase__ ( self , __lowercase=-2) -> str:
__UpperCamelCase :Dict = deque()
__UpperCamelCase :Any = []
if s == -2:
__UpperCamelCase :Optional[Any] = list(self.graph)[0]
d.append(__lowercase)
visited.append(__lowercase)
while d:
__UpperCamelCase :Optional[int] = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def UpperCamelCase__ ( self , __lowercase) -> int:
return len(self.graph[u])
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = []
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :Union[str, Any] = -2
__UpperCamelCase :str = []
__UpperCamelCase :Optional[int] = s
__UpperCamelCase :Union[str, Any] = False
__UpperCamelCase :Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__UpperCamelCase :List[str] = len(__lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase :Optional[Any] = True
if len(__lowercase) != 0:
__UpperCamelCase :Tuple = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :Optional[int] = False
indirect_parents.append(__lowercase)
__UpperCamelCase :List[str] = s
__UpperCamelCase :str = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return list(__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = []
__UpperCamelCase :Any = []
__UpperCamelCase :int = list(self.graph)[0]
stack.append(__lowercase)
visited.append(__lowercase)
__UpperCamelCase :Any = -2
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = s
__UpperCamelCase :int = False
__UpperCamelCase :str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__UpperCamelCase :List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__UpperCamelCase :Optional[Any] = len(__lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__UpperCamelCase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCamelCase :Any = True
if len(__lowercase) != 0:
__UpperCamelCase :Optional[Any] = stack[len(__lowercase) - 1]
else:
__UpperCamelCase :List[Any] = False
indirect_parents.append(__lowercase)
__UpperCamelCase :Tuple = s
__UpperCamelCase :List[str] = ss
# check if se have reached the starting point
if len(__lowercase) == 0:
return False
def UpperCamelCase__ ( self) -> List[Any]:
return list(self.graph)
def UpperCamelCase__ ( self , __lowercase=-2 , __lowercase=-1) -> int:
__UpperCamelCase :Optional[Any] = time()
self.dfs(__lowercase , __lowercase)
__UpperCamelCase :List[str] = time()
return end - begin
def UpperCamelCase__ ( self , __lowercase=-2) -> Any:
__UpperCamelCase :Optional[int] = time()
self.bfs(__lowercase)
__UpperCamelCase :Any = time()
return end - begin
| 43
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=1_026 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase :Optional[Any] = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=1_026 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
__UpperCamelCase :str = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
__UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__UpperCamelCase :str = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase :List[str] = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
__UpperCamelCase :Tuple = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=1_000 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
'''simple docstring'''
__UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
__UpperCamelCase :Tuple = RandomSampler(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :int = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
__UpperCamelCase :List[str] = []
__UpperCamelCase :str = 0
__UpperCamelCase :int = []
__UpperCamelCase :int = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
__UpperCamelCase :Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
__UpperCamelCase :Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = True
if secondary_learner is not None:
__UpperCamelCase :List[Any] = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase :List[Any] = -1
if predicted_q < threshold:
__UpperCamelCase :List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__UpperCamelCase :int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase :Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase :Tuple = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
__UpperCamelCase :Optional[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
__UpperCamelCase :str = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
__UpperCamelCase :Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase :Dict = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 43
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
set_seed(770)
__lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowercase = os.path.dirname(os.path.abspath(__file__))
__lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :str = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type == "text":
__UpperCamelCase :Tuple = BarkSemanticModel
__UpperCamelCase :List[str] = BarkSemanticConfig
__UpperCamelCase :Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase :int = BarkCoarseModel
__UpperCamelCase :Optional[Any] = BarkCoarseConfig
__UpperCamelCase :Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase :Any = BarkFineModel
__UpperCamelCase :Union[str, Any] = BarkFineConfig
__UpperCamelCase :Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase :Union[str, Any] = f"""{model_type}_small""" if use_small else model_type
__UpperCamelCase :Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__UpperCamelCase :List[str] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
__UpperCamelCase :str = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__UpperCamelCase :Dict = model_args['''vocab_size''']
__UpperCamelCase :Dict = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase :Tuple = model_args.pop('''n_head''' )
__UpperCamelCase :Optional[Any] = model_args.pop('''n_embd''' )
__UpperCamelCase :List[Any] = model_args.pop('''n_layer''' )
__UpperCamelCase :Union[str, Any] = ConfigClass(**checkpoint['''model_args'''] )
__UpperCamelCase :str = ModelClass(config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = GenerationConfigClass()
__UpperCamelCase :Tuple = model_generation_config
__UpperCamelCase :str = checkpoint['''model''']
# fixup checkpoint
__UpperCamelCase :List[Any] = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase :Optional[Any] = k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase :Union[str, Any] = new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
__UpperCamelCase :List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase :int = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__UpperCamelCase :int = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase :int = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = checkpoint['''best_val_loss'''].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase :List[Any] = '''cpu''' # do conversion on cpu
__UpperCamelCase :List[Any] = _get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = _load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
__UpperCamelCase :Optional[Any] = _bark_load_model(SCREAMING_SNAKE_CASE , '''cpu''' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
__UpperCamelCase :Dict = bark_model['''model''']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__UpperCamelCase :List[str] = 5
__UpperCamelCase :List[str] = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase :Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE )[0]
__UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE )
# take last logits
__UpperCamelCase :str = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase :Any = 3
__UpperCamelCase :List[Any] = 8
__UpperCamelCase :Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Tuple = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :List[Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Union[str, Any] = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Tuple = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase :int = BarkModel(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = semantic
__UpperCamelCase :Any = coarseAcoustic
__UpperCamelCase :Tuple = fineAcoustic
__UpperCamelCase :List[Any] = codec
__UpperCamelCase :int = bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model'''}
__lowercase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__lowercase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowercase , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase :Union[str, Any] = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase)
if isinstance(__lowercase , __lowercase)
else mask_token
)
__UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :str = do_lower_case
__UpperCamelCase :str = remove_space
__UpperCamelCase :Union[str, Any] = keep_accents
__UpperCamelCase :Dict = vocab_file
__UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__lowercase)
@property
def UpperCamelCase__ ( self) -> Any:
return len(self.sp_model)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.__dict__.copy()
__UpperCamelCase :Union[str, Any] = None
return state
def __setstate__( self , __lowercase) -> Any:
__UpperCamelCase :int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :Dict = {}
__UpperCamelCase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
if self.remove_space:
__UpperCamelCase :List[Any] = ''' '''.join(inputs.strip().split())
else:
__UpperCamelCase :List[Any] = inputs
__UpperCamelCase :str = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
__UpperCamelCase :Optional[Any] = unicodedata.normalize('''NFKD''' , __lowercase)
__UpperCamelCase :List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase)])
if self.do_lower_case:
__UpperCamelCase :List[Any] = outputs.lower()
return outputs
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Optional[Any] = self.preprocess_text(__lowercase)
__UpperCamelCase :List[str] = self.sp_model.encode(__lowercase , out_type=__lowercase)
__UpperCamelCase :Any = []
for piece in pieces:
if len(__lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
__UpperCamelCase :Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__UpperCamelCase :int = cur_pieces[1:]
else:
__UpperCamelCase :List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__lowercase)
else:
new_pieces.append(__lowercase)
return new_pieces
def UpperCamelCase__ ( self , __lowercase) -> str:
return self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
return self.sp_model.IdToPiece(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :int = []
__UpperCamelCase :List[Any] = ''''''
__UpperCamelCase :int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :List[str] = True
__UpperCamelCase :Optional[Any] = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :Tuple = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :str = [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[Any] = [self.sep_token_id]
__UpperCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :List[Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''MobileNetV2FeatureExtractor''']
__lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 1
|
from __future__ import annotations
import math
__lowercase = '''2020.9.26'''
__lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in locals().values() ):
__UpperCamelCase :int = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = ((x * distance) / (z + distance)) * scale
__UpperCamelCase :int = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''Axis must be a str''' )
__UpperCamelCase :str = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in input_variables.values() ):
__UpperCamelCase :List[Any] = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCamelCase :List[Any] = x * math.cos(SCREAMING_SNAKE_CASE ) - y * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = y * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = z
elif axis == "x":
__UpperCamelCase :int = y * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = z * math.cos(SCREAMING_SNAKE_CASE ) + y * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = x
elif axis == "y":
__UpperCamelCase :Tuple = x * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = z * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }')
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__UpperCamelCase :Dict = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase , cache_dir=__lowercase)
__UpperCamelCase :int = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase)[0] , '''snapshots'''))]
__UpperCamelCase :Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase :Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase)
__UpperCamelCase :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Optional[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :str = 4
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :Dict = num_samples * [prompt]
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :str = replicate(__lowercase)
__UpperCamelCase :Optional[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
__UpperCamelCase :Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__lowercase) == num_samples
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowercase)
__UpperCamelCase :List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :List[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :str = 50
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :Any = num_samples * [prompt]
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :List[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :str = shard(__lowercase)
__UpperCamelCase :List[str] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase)
__UpperCamelCase :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :str = jax.random.PRNGKey(0)
__UpperCamelCase :Dict = 50
__UpperCamelCase :Optional[Any] = jax.device_count()
__UpperCamelCase :Optional[int] = num_samples * [prompt]
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Optional[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
__UpperCamelCase :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :List[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :List[Any] = 50
__UpperCamelCase :Any = jax.device_count()
__UpperCamelCase :List[str] = num_samples * [prompt]
__UpperCamelCase :Tuple = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Any = replicate(__lowercase)
__UpperCamelCase :Optional[int] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Dict = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
__UpperCamelCase , __UpperCamelCase :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
__UpperCamelCase :str = scheduler.create_state()
__UpperCamelCase :Any = scheduler_state
__UpperCamelCase :Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Union[str, Any] = jax.random.PRNGKey(0)
__UpperCamelCase :Any = 50
__UpperCamelCase :str = jax.device_count()
__UpperCamelCase :Optional[int] = num_samples * [prompt]
__UpperCamelCase :List[str] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Optional[int] = replicate(__lowercase)
__UpperCamelCase :List[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :int = shard(__lowercase)
__UpperCamelCase :List[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Optional[int] = jax.device_count()
__UpperCamelCase :int = num_samples * [prompt]
__UpperCamelCase :Optional[Any] = jax.random.split(jax.random.PRNGKey(0) , __lowercase)
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , )
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :List[Any] = shard(__lowercase)
__UpperCamelCase :str = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = shard(__lowercase)
__UpperCamelCase :str = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''')}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()})]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()})
]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> Any:
import apache_beam as beam
__UpperCamelCase :int = beam.io.parquetio.WriteToParquet
__UpperCamelCase :Optional[int] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
with patch('''apache_beam.io.parquetio.WriteToParquet''') as write_parquet_mock:
__UpperCamelCase :List[Any] = partial(__lowercase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :Dict = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content''']) , sorted(['''foo''', '''bar''', '''foobar''']))
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Tuple = NestedBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}))
__UpperCamelCase :Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
| 43
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 1
|
from random import randint, random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 5 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
__UpperCamelCase :Union[str, Any] = (
randint(0 , SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE , -1 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
__UpperCamelCase :List[str] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase :Optional[int] = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
__UpperCamelCase :Dict = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
__UpperCamelCase :Tuple = min(next_highway[car_index] , SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = update(highway[i] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase :Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = None
a__ : int = BloomTokenizerFast
a__ : Any = BloomTokenizerFast
a__ : Optional[Any] = True
a__ : Optional[int] = False
a__ : Union[str, Any] = """tokenizer_file"""
a__ : Dict = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :int = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase :str = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__UpperCamelCase :Optional[int] = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCamelCase :Dict = tokenizer.batch_encode_plus(__lowercase)['''input_ids''']
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :Dict = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase=6) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :int = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase :str = '''This is a simple input'''
__UpperCamelCase :Optional[int] = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCamelCase :Any = ('''This is a simple input''', '''This is a pair''')
__UpperCamelCase :Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowercase , max_length=__lowercase)
tokenizer_r.encode_plus(__lowercase , max_length=__lowercase)
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase)
tokenizer_r.encode(__lowercase , max_length=__lowercase)
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
__UpperCamelCase :str = None # Hotfixing padding = None
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = self.get_rust_tokenizer()
__UpperCamelCase :List[str] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowercase)
__UpperCamelCase :Optional[int] = next(iter(__lowercase))['''premise'''] # pick up one data
__UpperCamelCase :Union[str, Any] = list(sample_data.values())
__UpperCamelCase :Optional[int] = list(map(tokenizer.encode , __lowercase))
__UpperCamelCase :Union[str, Any] = [tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase) for x in output_tokens]
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 43
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 1
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """char"""
a__ : str = """bpe"""
a__ : Union[str, Any] = """wp"""
__lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = ["""image_processor""", """char_tokenizer"""]
a__ : Optional[Any] = """ViTImageProcessor"""
a__ : Optional[Any] = """MgpstrTokenizer"""
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase) -> Any:
__UpperCamelCase :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
__UpperCamelCase :Any = kwargs.pop('''feature_extractor''')
__UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
__UpperCamelCase :Optional[Any] = tokenizer
__UpperCamelCase :Any = AutoTokenizer.from_pretrained('''gpt2''')
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''')
super().__init__(__lowercase , __lowercase)
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase) -> Tuple:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
__UpperCamelCase :Union[str, Any] = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase)
if text is not None:
__UpperCamelCase :Dict = self.char_tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase)
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCamelCase :int = encodings['''input_ids''']
return inputs
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = sequences
__UpperCamelCase :Dict = char_preds.size(0)
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self._decode_helper(__lowercase , '''char''')
__UpperCamelCase , __UpperCamelCase :Dict = self._decode_helper(__lowercase , '''bpe''')
__UpperCamelCase , __UpperCamelCase :Optional[int] = self._decode_helper(__lowercase , '''wp''')
__UpperCamelCase :Optional[int] = []
__UpperCamelCase :str = []
for i in range(__lowercase):
__UpperCamelCase :Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
__UpperCamelCase :Optional[Any] = scores.index(max(__lowercase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
__UpperCamelCase :int = {}
__UpperCamelCase :Dict = final_strs
__UpperCamelCase :Tuple = final_scores
__UpperCamelCase :Dict = char_strs
__UpperCamelCase :List[str] = bpe_strs
__UpperCamelCase :Union[str, Any] = wp_strs
return out
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> str:
if format == DecodeType.CHARACTER:
__UpperCamelCase :Tuple = self.char_decode
__UpperCamelCase :Optional[Any] = 1
__UpperCamelCase :Any = '''[s]'''
elif format == DecodeType.BPE:
__UpperCamelCase :int = self.bpe_decode
__UpperCamelCase :Dict = 2
__UpperCamelCase :int = '''#'''
elif format == DecodeType.WORDPIECE:
__UpperCamelCase :Optional[int] = self.wp_decode
__UpperCamelCase :List[str] = 102
__UpperCamelCase :Tuple = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""")
__UpperCamelCase , __UpperCamelCase :List[str] = [], []
__UpperCamelCase :List[Any] = pred_logits.size(0)
__UpperCamelCase :List[str] = pred_logits.size(1)
__UpperCamelCase , __UpperCamelCase :List[str] = pred_logits.topk(1 , dim=-1 , largest=__lowercase , sorted=__lowercase)
__UpperCamelCase :List[Any] = preds_index.view(-1 , __lowercase)[:, 1:]
__UpperCamelCase :Optional[Any] = decoder(__lowercase)
__UpperCamelCase , __UpperCamelCase :str = torch.nn.functional.softmax(__lowercase , dim=2).max(dim=2)
__UpperCamelCase :str = preds_max_prob[:, 1:]
for index in range(__lowercase):
__UpperCamelCase :int = preds_str[index].find(__lowercase)
__UpperCamelCase :Union[str, Any] = preds_str[index][:pred_eos]
__UpperCamelCase :int = preds_index[index].cpu().tolist()
__UpperCamelCase :str = pred_index.index(__lowercase) if eos_token in pred_index else -1
__UpperCamelCase :Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__UpperCamelCase :str = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowercase)
conf_scores.append(__lowercase)
return dec_strs, conf_scores
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Optional[int] = [seq.replace(''' ''' , '''''') for seq in self.char_tokenizer.batch_decode(__lowercase)]
return decode_strs
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
return self.bpe_tokenizer.batch_decode(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = [seq.replace(''' ''' , '''''') for seq in self.wp_tokenizer.batch_decode(__lowercase)]
return decode_strs
| 43
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 1
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowercase , __lowercase , __lowercase = False, False, False
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self) -> Any:
return self.pa_type
def UpperCamelCase__ ( self , __lowercase) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(__lowercase , __lowercase):
return {"bytes": None, "path": value}
elif isinstance(__lowercase , __lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__UpperCamelCase :int = BytesIO()
sf.write(__lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__UpperCamelCase :Optional[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32_767
else:
__UpperCamelCase :Union[str, Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32_767
__UpperCamelCase :Tuple = BytesIO(bytes())
sf.write(__lowercase , __lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
__UpperCamelCase , __UpperCamelCase :List[Any] = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
__UpperCamelCase :Dict = xsplitext(__lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
__UpperCamelCase :Any = token_per_repo_id or {}
__UpperCamelCase :Any = path.split('''::''')[-1]
try:
__UpperCamelCase :Optional[Any] = string_to_dict(__lowercase , config.HUB_DATASETS_URL)['''repo_id''']
__UpperCamelCase :Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__UpperCamelCase :str = None
with xopen(__lowercase , '''rb''' , use_auth_token=__lowercase) as f:
__UpperCamelCase , __UpperCamelCase :List[str] = sf.read(__lowercase)
else:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = sf.read(__lowercase)
__UpperCamelCase :List[str] = array.T
if self.mono:
__UpperCamelCase :Any = librosa.to_mono(__lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
__UpperCamelCase :Tuple = librosa.resample(__lowercase , orig_sr=__lowercase , target_sr=self.sampling_rate)
__UpperCamelCase :Optional[int] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def UpperCamelCase__ ( self , __lowercase) -> pa.StructArray:
if pa.types.is_string(storage.type):
__UpperCamelCase :List[str] = pa.array([None] * len(__lowercase) , type=pa.binary())
__UpperCamelCase :Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
__UpperCamelCase :Union[str, Any] = pa.array([None] * len(__lowercase) , type=pa.string())
__UpperCamelCase :Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
__UpperCamelCase :Any = pa.array([Audio().encode_example(__lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
__UpperCamelCase :List[str] = storage.field('''bytes''')
else:
__UpperCamelCase :Union[str, Any] = pa.array([None] * len(__lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
__UpperCamelCase :Tuple = storage.field('''path''')
else:
__UpperCamelCase :int = pa.array([None] * len(__lowercase) , type=pa.string())
__UpperCamelCase :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(__lowercase , self.pa_type)
def UpperCamelCase__ ( self , __lowercase) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowercase):
with xopen(__lowercase , '''rb''') as f:
__UpperCamelCase :Any = f.read()
return bytes_
__UpperCamelCase :Optional[int] = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCamelCase :List[str] = pa.array(
[os.path.basename(__lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
__UpperCamelCase :Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(__lowercase , self.pa_type)
| 43
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = """bloom"""
a__ : Dict = ["""past_key_values"""]
a__ : Dict = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , __lowercase=250_880 , __lowercase=64 , __lowercase=2 , __lowercase=8 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1 , __lowercase=False , **__lowercase , ) -> Optional[Any]:
__UpperCamelCase :Tuple = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCamelCase :Union[str, Any] = kwargs.pop('''n_embed''' , __lowercase)
__UpperCamelCase :Any = hidden_size if n_embed is None else n_embed
__UpperCamelCase :List[str] = n_layer
__UpperCamelCase :Optional[int] = n_head
__UpperCamelCase :Optional[Any] = layer_norm_epsilon
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :str = use_cache
__UpperCamelCase :List[Any] = pretraining_tp
__UpperCamelCase :Tuple = apply_residual_connection_post_layernorm
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Optional[int] = attention_dropout
__UpperCamelCase :Dict = bos_token_id
__UpperCamelCase :List[Any] = eos_token_id
__UpperCamelCase :int = slow_but_exact
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = version.parse("""1.12""" )
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> List[Any]:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :List[Any] = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' , inverted_values_shape=__lowercase)
__UpperCamelCase :List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
@property
def UpperCamelCase__ ( self) -> float:
return 1E-3
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :str = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Optional[int] = self._config.hidden_size // self.num_attention_heads
__UpperCamelCase :Union[str, Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCamelCase :Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :str = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :int = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Any = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 43
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
a__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
__UpperCamelCase :List[str] = import_module('''tasks''' )
try:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase :Optional[Any] = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase :Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase :int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase :str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase :int = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase :List[str] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase :Optional[Any] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
__UpperCamelCase :Dict = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
__UpperCamelCase , __UpperCamelCase :Any = preds.shape
__UpperCamelCase :List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :Optional[Any] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
__UpperCamelCase :str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase :List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase :Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase :Dict = trainer.evaluate()
__UpperCamelCase :str = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
__UpperCamelCase :str = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = trainer.predict(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__UpperCamelCase :Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 43
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = '''▁'''
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__lowercase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Dict = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
a__ : List[int] = []
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = None , __lowercase=None , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , tokenizer_file=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
__UpperCamelCase :Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase :List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase :Tuple = 1
__UpperCamelCase :List[Any] = len(self.sp_model)
__UpperCamelCase :Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowercase)
}
__UpperCamelCase :Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCamelCase :int = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__UpperCamelCase :Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCamelCase :Optional[int] = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
__UpperCamelCase :List[str] = src_lang if src_lang is not None else '''en_XX'''
__UpperCamelCase :Dict = self.lang_code_to_id[self._src_lang]
__UpperCamelCase :List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Tuple:
__UpperCamelCase :Dict = self.__dict__.copy()
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowercase) -> Any:
__UpperCamelCase :Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :str = {}
__UpperCamelCase :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def UpperCamelCase__ ( self) -> Optional[Any]:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
__UpperCamelCase :Optional[int] = [1] * len(self.prefix_tokens)
__UpperCamelCase :List[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase)) + suffix_ones
return prefix_ones + ([0] * len(__lowercase)) + ([0] * len(__lowercase)) + suffix_ones
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
__UpperCamelCase :List[Any] = src_lang
__UpperCamelCase :Dict = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase)
__UpperCamelCase :str = self.convert_tokens_to_ids(__lowercase)
__UpperCamelCase :List[Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase :Union[str, Any] = self.sp_model.PieceToId(__lowercase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , __lowercase) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :List[Any] = ''''''.join(__lowercase).replace(__lowercase , ''' ''').strip()
return out_string
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
def UpperCamelCase__ ( self , __lowercase , __lowercase = "en_XX" , __lowercase = None , __lowercase = "ro_RO" , **__lowercase , ) -> BatchEncoding:
__UpperCamelCase :Optional[int] = src_lang
__UpperCamelCase :str = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase__ ( self) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :Optional[Any] = self.lang_code_to_id[src_lang]
__UpperCamelCase :Optional[Any] = []
__UpperCamelCase :Tuple = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :str = self.lang_code_to_id[lang]
__UpperCamelCase :Any = []
__UpperCamelCase :Dict = [self.eos_token_id, self.cur_lang_code]
| 43
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase :Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase :List[str] = ''''''
else:
__UpperCamelCase :Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase :Tuple = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase :Optional[int] = in_proj_bias[: config.hidden_size]
__UpperCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase :int = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase :int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = val
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__UpperCamelCase :List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCamelCase :List[str] = 8
# set labels if required
if not base_model:
__UpperCamelCase :int = 1_000
__UpperCamelCase :int = '''huggingface/label-files'''
__UpperCamelCase :List[str] = '''imagenet-1k-id2label.json'''
__UpperCamelCase :Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :str = idalabel
__UpperCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCamelCase :str = 384
__UpperCamelCase :str = 1_536
__UpperCamelCase :List[Any] = 12
__UpperCamelCase :Any = 6
# load original model from torch hub
__UpperCamelCase :Dict = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase :List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
__UpperCamelCase :List[Any] = ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE ).eval()
else:
__UpperCamelCase :Any = ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCamelCase :str = ViTImageProcessor()
__UpperCamelCase :Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase :List[Any] = encoding['''pixel_values''']
__UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE )
if base_model:
__UpperCamelCase :Optional[Any] = original_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__UpperCamelCase :Tuple = original_model(SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCamelCase :str = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCamelCase :Dict = DisjunctiveConstraint(__lowercase)
self.assertTrue(isinstance(dc.token_ids , __lowercase))
with self.assertRaises(__lowercase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__lowercase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCamelCase__ ( self) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCamelCase :Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase):
DisjunctiveConstraint(__lowercase) # fails here
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
__UpperCamelCase :Any = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(1)
__UpperCamelCase :Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = dc.update(2)
__UpperCamelCase :Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(3)
__UpperCamelCase :List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Dict = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCamelCase :List[Any] = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 43
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=30 , __lowercase=400 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , __lowercase=True , __lowercase=1 / 255 , __lowercase=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase :Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__UpperCamelCase :List[Any] = parent
__UpperCamelCase :Tuple = batch_size
__UpperCamelCase :int = num_channels
__UpperCamelCase :Union[str, Any] = min_resolution
__UpperCamelCase :Any = max_resolution
__UpperCamelCase :Tuple = do_resize
__UpperCamelCase :int = size
__UpperCamelCase :int = do_normalize
__UpperCamelCase :Dict = image_mean
__UpperCamelCase :List[Any] = image_std
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :List[Any] = rescale_factor
__UpperCamelCase :List[Any] = do_pad
def UpperCamelCase__ ( self) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , __lowercase , __lowercase=False) -> Tuple:
if not batched:
__UpperCamelCase :Optional[int] = image_inputs[0]
if isinstance(__lowercase , Image.Image):
__UpperCamelCase , __UpperCamelCase :int = image.size
else:
__UpperCamelCase , __UpperCamelCase :str = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase :List[str] = int(self.size['''shortest_edge'''] * h / w)
__UpperCamelCase :str = self.size['''shortest_edge''']
elif w > h:
__UpperCamelCase :List[Any] = self.size['''shortest_edge''']
__UpperCamelCase :Optional[int] = int(self.size['''shortest_edge'''] * w / h)
else:
__UpperCamelCase :Union[str, Any] = self.size['''shortest_edge''']
__UpperCamelCase :Union[str, Any] = self.size['''shortest_edge''']
else:
__UpperCamelCase :str = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase :Any = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__UpperCamelCase :Tuple = max(__lowercase , key=lambda __lowercase: item[0])[0]
__UpperCamelCase :List[Any] = max(__lowercase , key=lambda __lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = ConditionalDetrImageProcessingTester(self)
@property
def UpperCamelCase__ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowercase , '''image_mean'''))
self.assertTrue(hasattr(__lowercase , '''image_std'''))
self.assertTrue(hasattr(__lowercase , '''do_normalize'''))
self.assertTrue(hasattr(__lowercase , '''do_resize'''))
self.assertTrue(hasattr(__lowercase , '''size'''))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , __lowercase)
__UpperCamelCase :str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , __lowercase)
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
# Initialize image_processing
__UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCamelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image)
# Test not batched input
__UpperCamelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__UpperCamelCase , __UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase :Dict = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase)
__UpperCamelCase :List[str] = image_processing(__lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self) -> List[Any]:
# Initialize image_processing
__UpperCamelCase :str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__UpperCamelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray)
# Test not batched input
__UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__UpperCamelCase , __UpperCamelCase :List[Any] = self.image_processor_tester.get_expected_values(__lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase :Dict = image_processing(__lowercase , return_tensors='''pt''').pixel_values
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor)
# Test not batched input
__UpperCamelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__UpperCamelCase , __UpperCamelCase :Any = self.image_processor_tester.get_expected_values(__lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase :Tuple = image_processing(__lowercase , return_tensors='''pt''').pixel_values
__UpperCamelCase , __UpperCamelCase :List[Any] = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self) -> Any:
# prepare image and target
__UpperCamelCase :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
__UpperCamelCase :int = json.loads(f.read())
__UpperCamelCase :int = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__UpperCamelCase :int = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''')
__UpperCamelCase :Optional[int] = image_processing(images=__lowercase , annotations=__lowercase , return_tensors='''pt''')
# verify pixel values
__UpperCamelCase :Union[str, Any] = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowercase , atol=1E-4))
# verify area
__UpperCamelCase :str = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowercase))
# verify boxes
__UpperCamelCase :str = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowercase)
__UpperCamelCase :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowercase , atol=1E-3))
# verify image_id
__UpperCamelCase :List[Any] = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowercase))
# verify is_crowd
__UpperCamelCase :Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowercase))
# verify class_labels
__UpperCamelCase :Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowercase))
# verify orig_size
__UpperCamelCase :Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowercase))
# verify size
__UpperCamelCase :Dict = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowercase))
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
# prepare image, target and masks_path
__UpperCamelCase :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
__UpperCamelCase :str = json.loads(f.read())
__UpperCamelCase :Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__UpperCamelCase :Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
__UpperCamelCase :Tuple = ConditionalDetrImageProcessor(format='''coco_panoptic''')
__UpperCamelCase :Union[str, Any] = image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors='''pt''')
# verify pixel values
__UpperCamelCase :int = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __lowercase)
__UpperCamelCase :Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowercase , atol=1E-4))
# verify area
__UpperCamelCase :Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowercase))
# verify boxes
__UpperCamelCase :Any = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowercase)
__UpperCamelCase :Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowercase , atol=1E-3))
# verify image_id
__UpperCamelCase :List[Any] = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowercase))
# verify is_crowd
__UpperCamelCase :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowercase))
# verify class_labels
__UpperCamelCase :List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowercase))
# verify masks
__UpperCamelCase :Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowercase)
# verify orig_size
__UpperCamelCase :str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowercase))
# verify size
__UpperCamelCase :Any = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowercase))
| 43
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase = "▁" , __lowercase = True , __lowercase = "<unk>" , __lowercase = "</s>" , __lowercase = "<pad>" , ) -> Tuple:
__UpperCamelCase :List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__UpperCamelCase :int = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__UpperCamelCase :int = token_dict['''token''']
__UpperCamelCase :List[str] = Tokenizer(Unigram())
__UpperCamelCase :List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''') , ''' '''),
normalizers.Lowercase(),
])
__UpperCamelCase :str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase , add_prefix_space=__lowercase),
pre_tokenizers.Digits(individual_digits=__lowercase),
pre_tokenizers.Punctuation(),
])
__UpperCamelCase :List[Any] = decoders.Metaspace(replacement=__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :Union[str, Any] = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
__UpperCamelCase :Dict = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = 8_000 , __lowercase = True , ) -> List[Any]:
__UpperCamelCase :List[str] = trainers.UnigramTrainer(
vocab_size=__lowercase , special_tokens=self.special_tokens_list , show_progress=__lowercase , )
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = [files]
self._tokenizer.train(__lowercase , trainer=__lowercase)
self.add_unk_id()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 8_000 , __lowercase = True , ) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = trainers.UnigramTrainer(
vocab_size=__lowercase , special_tokens=self.special_tokens_list , show_progress=__lowercase , )
self._tokenizer.train_from_iterator(__lowercase , trainer=__lowercase)
self.add_unk_id()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = json.loads(self._tokenizer.to_str())
__UpperCamelCase :Tuple = self.special_tokens['''unk''']['''id''']
__UpperCamelCase :str = Tokenizer.from_str(json.dumps(__lowercase))
| 43
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowercase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = val
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase :str = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__UpperCamelCase :Optional[Any] = value
else:
__UpperCamelCase :Any = value
return new_state_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = ''''''
if is_panoptic:
__UpperCamelCase :Dict = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase :Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCamelCase :Optional[int] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase :int = in_proj_weight[:256, :]
__UpperCamelCase :Union[str, Any] = in_proj_bias[:256]
__UpperCamelCase :Any = in_proj_weight[256:512, :]
__UpperCamelCase :Optional[int] = in_proj_bias[256:512]
__UpperCamelCase :str = in_proj_weight[-256:, :]
__UpperCamelCase :Optional[Any] = in_proj_bias[-256:]
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCamelCase :List[str] = '''resnet101'''
if "dc5" in model_name:
__UpperCamelCase :Dict = True
__UpperCamelCase :Dict = '''panoptic''' in model_name
if is_panoptic:
__UpperCamelCase :Optional[int] = 250
else:
__UpperCamelCase :Dict = 91
__UpperCamelCase :List[str] = '''huggingface/label-files'''
__UpperCamelCase :Union[str, Any] = '''coco-detection-id2label.json'''
__UpperCamelCase :Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :List[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :str = idalabel
__UpperCamelCase :Tuple = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCamelCase :Optional[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__UpperCamelCase :Any = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE )
# prepare image
__UpperCamelCase :Tuple = prepare_img()
__UpperCamelCase :str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__UpperCamelCase :str = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
__UpperCamelCase :Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase :Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCamelCase :List[str] = '''conditional_detr.''' + src
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = rename_backbone_keys(SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE , is_panoptic=SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase :Union[str, Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__UpperCamelCase :Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCamelCase :List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__UpperCamelCase :int = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__UpperCamelCase :Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase :Union[str, Any] = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__UpperCamelCase :Optional[int] = conditional_detr(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = model(SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 43
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowercase).to(__lowercase)
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCamelCase :int = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
__UpperCamelCase :Tuple = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
__UpperCamelCase :int = model(input_ids.to(__lowercase) , labels=labels.to(__lowercase)).loss
__UpperCamelCase :Optional[Any] = -(labels.shape[-1] * loss.item())
__UpperCamelCase :List[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 43
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase :Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase :str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase :str = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase :int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__lowercase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :List[str] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase :List[Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__UpperCamelCase :Tuple = '''cpu'''
__UpperCamelCase :Union[str, Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = Path(SCREAMING_SNAKE_CASE )
# TEXT ENCODER
__UpperCamelCase :str = pipeline.text_encoder.config.max_position_embeddings
__UpperCamelCase :int = pipeline.text_encoder.config.hidden_size
__UpperCamelCase :int = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
__UpperCamelCase :Dict = pipeline.unet.config.in_channels
__UpperCamelCase :Union[str, Any] = pipeline.unet.config.sample_size
__UpperCamelCase :Optional[Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=SCREAMING_SNAKE_CASE , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Optional[Any] = str(unet_path.absolute().as_posix() )
__UpperCamelCase :Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = onnx.load(SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE )
os.mkdir(SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_as_external_data=SCREAMING_SNAKE_CASE , all_tensors_to_one_file=SCREAMING_SNAKE_CASE , location='''weights.pb''' , convert_attribute=SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
__UpperCamelCase :str = pipeline.vae
__UpperCamelCase :str = vae_encoder.config.in_channels
__UpperCamelCase :Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__UpperCamelCase :Optional[int] = lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : vae_encoder.encode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
# VAE DECODER
__UpperCamelCase :str = pipeline.vae
__UpperCamelCase :Optional[int] = vae_decoder.config.latent_channels
__UpperCamelCase :str = vae_decoder.config.out_channels
# forward only through the decoder part
__UpperCamelCase :Dict = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__UpperCamelCase :Any = pipeline.safety_checker
__UpperCamelCase :Tuple = safety_checker.config.vision_config.num_channels
__UpperCamelCase :Any = safety_checker.config.vision_config.image_size
__UpperCamelCase :Tuple = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
__UpperCamelCase :Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
__UpperCamelCase :List[Any] = pipeline.feature_extractor
else:
__UpperCamelCase :Any = None
__UpperCamelCase :str = None
__UpperCamelCase :Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
print('''ONNX pipeline saved to''' , SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
__UpperCamelCase :Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__lowercase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 43
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not sentence:
return ""
__UpperCamelCase :Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=16 , __lowercase=36 , __lowercase=6 , __lowercase=6 , __lowercase=6 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :str = parent
__UpperCamelCase :List[str] = batch_size
__UpperCamelCase :List[Any] = seq_length
__UpperCamelCase :Optional[Any] = is_training
__UpperCamelCase :List[Any] = use_input_mask
__UpperCamelCase :str = use_token_type_ids
__UpperCamelCase :Dict = use_labels
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Any = embedding_size
__UpperCamelCase :Union[str, Any] = hidden_size
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :List[Any] = num_hidden_groups
__UpperCamelCase :Optional[Any] = num_attention_heads
__UpperCamelCase :Tuple = intermediate_size
__UpperCamelCase :Any = hidden_act
__UpperCamelCase :Optional[Any] = hidden_dropout_prob
__UpperCamelCase :Any = attention_probs_dropout_prob
__UpperCamelCase :List[Any] = max_position_embeddings
__UpperCamelCase :str = type_vocab_size
__UpperCamelCase :Any = type_sequence_label_size
__UpperCamelCase :int = initializer_range
__UpperCamelCase :int = num_labels
__UpperCamelCase :Tuple = num_choices
__UpperCamelCase :str = scope
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = None
if self.use_input_mask:
__UpperCamelCase :str = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Dict = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :Optional[Any] = None
__UpperCamelCase :Dict = None
__UpperCamelCase :List[Any] = None
if self.use_labels:
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> List[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = AlbertModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :List[Any] = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :int = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :int = AlbertForPreTraining(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , sentence_order_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Dict = AlbertForMaskedLM(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[str] = AlbertForQuestionAnswering(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[str] = self.num_labels
__UpperCamelCase :Optional[Any] = AlbertForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[Any] = self.num_labels
__UpperCamelCase :Optional[Any] = AlbertForTokenClassification(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :int = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.num_choices
__UpperCamelCase :Optional[Any] = AlbertForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[int] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Optional[Any] = config_and_inputs
__UpperCamelCase :Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : int = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[Any] = True
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> Any:
__UpperCamelCase :List[Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class in get_values(__lowercase):
__UpperCamelCase :Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase)
__UpperCamelCase :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase)
return inputs_dict
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = AlbertModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase :str = type
self.model_tester.create_and_check_model(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> str:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :List[Any] = AlbertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = AlbertModel.from_pretrained('''albert-base-v2''')
__UpperCamelCase :Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
__UpperCamelCase :str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase)[0]
__UpperCamelCase :Optional[Any] = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Tuple = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4))
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return int(x / 2**20 )
class lowerCamelCase_ :
'''simple docstring'''
def __enter__( self) -> Any:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__UpperCamelCase :Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *__lowercase) -> str:
gc.collect()
torch.cuda.empty_cache()
__UpperCamelCase :int = torch.cuda.memory_allocated()
__UpperCamelCase :str = torch.cuda.max_memory_allocated()
__UpperCamelCase :Tuple = bamb(self.end - self.begin)
__UpperCamelCase :str = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" , SCREAMING_SNAKE_CASE = 320 , SCREAMING_SNAKE_CASE = 160 , ):
'''simple docstring'''
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Any = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :Any = config['''lr''']
__UpperCamelCase :Tuple = int(config['''num_epochs'''] )
__UpperCamelCase :Union[str, Any] = int(config['''seed'''] )
__UpperCamelCase :Any = int(config['''batch_size'''] )
__UpperCamelCase :Dict = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Optional[int] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Union[str, Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :int = 1
__UpperCamelCase :List[Any] = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :List[Any] = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :List[str] = 0
# Now we train the model
__UpperCamelCase :List[str] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = outputs.loss
__UpperCamelCase :Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__UpperCamelCase :Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=SCREAMING_SNAKE_CASE , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=SCREAMING_SNAKE_CASE , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of train epochs.''' , )
__UpperCamelCase :Union[str, Any] = parser.parse_args()
__UpperCamelCase :Optional[int] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
raise NotImplementedError()
def UpperCamelCase__ ( self) -> Union[str, Any]:
raise NotImplementedError()
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = False , **__lowercase) -> Optional[Any]:
__UpperCamelCase :Dict = tokenizer
__UpperCamelCase :str = skip_prompt
__UpperCamelCase :str = decode_kwargs
# variables used in the streaming process
__UpperCamelCase :List[str] = []
__UpperCamelCase :Any = 0
__UpperCamelCase :List[str] = True
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''')
elif len(value.shape) > 1:
__UpperCamelCase :List[str] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__UpperCamelCase :int = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
__UpperCamelCase :str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n'''):
__UpperCamelCase :Optional[Any] = text[self.print_len :]
__UpperCamelCase :Tuple = []
__UpperCamelCase :str = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowercase) > 0 and self._is_chinese_char(ord(text[-1])):
__UpperCamelCase :List[Any] = text[self.print_len :]
self.print_len += len(__lowercase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__UpperCamelCase :Optional[int] = text[self.print_len : text.rfind(''' ''') + 1]
self.print_len += len(__lowercase)
self.on_finalized_text(__lowercase)
def UpperCamelCase__ ( self) -> str:
# Flush the cache, if it exists
if len(self.token_cache) > 0:
__UpperCamelCase :int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
__UpperCamelCase :List[str] = text[self.print_len :]
__UpperCamelCase :str = []
__UpperCamelCase :Optional[Any] = 0
else:
__UpperCamelCase :int = ''''''
__UpperCamelCase :List[Any] = True
self.on_finalized_text(__lowercase , stream_end=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = False) -> Union[str, Any]:
print(__lowercase , flush=__lowercase , end='''''' if not stream_end else None)
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = False , __lowercase = None , **__lowercase) -> Dict:
super().__init__(__lowercase , __lowercase , **__lowercase)
__UpperCamelCase :str = Queue()
__UpperCamelCase :Dict = None
__UpperCamelCase :Dict = timeout
def UpperCamelCase__ ( self , __lowercase , __lowercase = False) -> str:
self.text_queue.put(__lowercase , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self) -> Tuple:
return self
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = {}
__UpperCamelCase :str = job['''started_at''']
__UpperCamelCase :Optional[int] = job['''completed_at''']
__UpperCamelCase :int = date_parser.parse(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = date_parser.parse(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__UpperCamelCase :int = start
__UpperCamelCase :Optional[Any] = end
__UpperCamelCase :Tuple = duration_in_min
return job_info
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Tuple = None
if token is not None:
__UpperCamelCase :Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
__UpperCamelCase :List[str] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__UpperCamelCase :Tuple = requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
__UpperCamelCase :Union[str, Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(SCREAMING_SNAKE_CASE ) for job in result['''jobs''']} )
__UpperCamelCase :Optional[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = requests.get(url + f"""&page={i + 2}""" , headers=SCREAMING_SNAKE_CASE ).json()
job_time.update({job['''name''']: extract_time_from_single_job(SCREAMING_SNAKE_CASE ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowercase = parser.parse_args()
__lowercase = get_job_time(args.workflow_run_id)
__lowercase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 43
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :int = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCamelCase :Tuple = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCamelCase :Optional[Any] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
__UpperCamelCase :List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCamelCase :Any = os.path.join(self.tmpdirname , __lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__lowercase , __lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :str = self.get_rust_tokenizer()
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCamelCase :Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase)
__UpperCamelCase :Union[str, Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCamelCase :Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __lowercase)
self.assertIsInstance(processor_fast.tokenizer , __lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __lowercase)
self.assertIsInstance(processor_fast.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0)
__UpperCamelCase :Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = self.get_image_processor()
__UpperCamelCase :Optional[Any] = self.get_tokenizer()
__UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Dict = self.prepare_image_inputs()
__UpperCamelCase :Union[str, Any] = image_processor(__lowercase , return_tensors='''np''')
__UpperCamelCase :str = processor(images=__lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = self.get_image_processor()
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :str = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :str = processor(text=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer(__lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[int] = '''lower newer'''
__UpperCamelCase :str = self.prepare_image_inputs()
__UpperCamelCase :Any = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase :List[Any] = processor(images=__lowercase , visual_prompt=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''conditional_pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :List[str] = self.get_tokenizer()
__UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Optional[Any] = processor.batch_decode(__lowercase)
__UpperCamelCase :Any = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1_000_000 ):
'''simple docstring'''
__UpperCamelCase :str = 1
__UpperCamelCase :Optional[Any] = 1
__UpperCamelCase :Union[str, Any] = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = 0
__UpperCamelCase :int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCamelCase :List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCamelCase :Optional[int] = counter
if counter > pre_counter:
__UpperCamelCase :int = inputa
__UpperCamelCase :List[str] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 43
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
a__ : ClassVar[Features] = Features({} )
a__ : str = "text"
@property
def UpperCamelCase__ ( self) -> Dict[str, str]:
return {self.text_column: "text"}
| 43
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 1
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , __lowercase , __lowercase , __lowercase = None , __lowercase = 50_257 , __lowercase = 1_024 , __lowercase = 768 , __lowercase = 12 , __lowercase = 12 , __lowercase = None , __lowercase = "gelu_new" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 1E-5 , __lowercase = 0.02 , __lowercase = True , __lowercase = True , __lowercase = False , __lowercase = False , ) -> Dict:
super().__init__()
__UpperCamelCase :Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""")
__UpperCamelCase :int = prefix_inner_dim
__UpperCamelCase :List[Any] = prefix_hidden_dim
__UpperCamelCase :int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__UpperCamelCase :Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowercase) if self.prefix_hidden_dim is not None else nn.Identity()
)
__UpperCamelCase :Union[str, Any] = GPTaConfig(
vocab_size=__lowercase , n_positions=__lowercase , n_embd=__lowercase , n_layer=__lowercase , n_head=__lowercase , n_inner=__lowercase , activation_function=__lowercase , resid_pdrop=__lowercase , embd_pdrop=__lowercase , attn_pdrop=__lowercase , layer_norm_epsilon=__lowercase , initializer_range=__lowercase , scale_attn_weights=__lowercase , use_cache=__lowercase , scale_attn_by_inverse_layer_idx=__lowercase , reorder_and_upcast_attn=__lowercase , )
__UpperCamelCase :List[str] = GPTaLMHeadModel(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , __lowercase = None , ) -> Any:
__UpperCamelCase :Any = self.transformer.transformer.wte(__lowercase)
__UpperCamelCase :Union[str, Any] = self.encode_prefix(__lowercase)
__UpperCamelCase :List[str] = self.decode_prefix(__lowercase)
__UpperCamelCase :str = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
__UpperCamelCase :List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
__UpperCamelCase :int = torch.cat((dummy_token, input_ids) , dim=1)
__UpperCamelCase :int = self.transformer(inputs_embeds=__lowercase , labels=__lowercase , attention_mask=__lowercase)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> torch.Tensor:
return torch.zeros(__lowercase , self.prefix_length , dtype=torch.intaa , device=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
return self.encode_prefix(__lowercase)
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.split(__lowercase , 1 , dim=0)
__UpperCamelCase :Tuple = []
__UpperCamelCase :Union[str, Any] = []
for feature in features:
__UpperCamelCase :Optional[Any] = self.decode_prefix(feature.to(__lowercase)) # back to the clip feature
# Only support beam search for now
__UpperCamelCase , __UpperCamelCase :List[str] = self.generate_beam(
input_embeds=__lowercase , device=__lowercase , eos_token_id=__lowercase)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
__UpperCamelCase :Optional[Any] = torch.stack(__lowercase)
__UpperCamelCase :str = torch.stack(__lowercase)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = 5 , __lowercase = 67 , __lowercase = 1.0 , __lowercase = None , ) -> Optional[int]:
__UpperCamelCase :Optional[Any] = eos_token_id
__UpperCamelCase :str = None
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :List[Any] = torch.ones(__lowercase , device=__lowercase , dtype=torch.int)
__UpperCamelCase :List[str] = torch.zeros(__lowercase , device=__lowercase , dtype=torch.bool)
if input_embeds is not None:
__UpperCamelCase :Dict = input_embeds
else:
__UpperCamelCase :int = self.transformer.transformer.wte(__lowercase)
for i in range(__lowercase):
__UpperCamelCase :List[str] = self.transformer(inputs_embeds=__lowercase)
__UpperCamelCase :List[str] = outputs.logits
__UpperCamelCase :Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__UpperCamelCase :List[str] = logits.softmax(-1).log()
if scores is None:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = logits.topk(__lowercase , -1)
__UpperCamelCase :List[str] = generated.expand(__lowercase , *generated.shape[1:])
__UpperCamelCase , __UpperCamelCase :Optional[int] = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
__UpperCamelCase :str = next_tokens
else:
__UpperCamelCase :List[Any] = tokens.expand(__lowercase , *tokens.shape[1:])
__UpperCamelCase :List[str] = torch.cat((tokens, next_tokens) , dim=1)
else:
__UpperCamelCase :Dict = -float(np.inf)
__UpperCamelCase :Tuple = 0
__UpperCamelCase :List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__UpperCamelCase :Optional[Any] = scores_sum / seq_lengths[:, None]
__UpperCamelCase , __UpperCamelCase :Optional[Any] = scores_sum_average.view(-1).topk(__lowercase , -1)
__UpperCamelCase :str = next_tokens // scores_sum.shape[1]
__UpperCamelCase :Tuple = seq_lengths[next_tokens_source]
__UpperCamelCase :str = next_tokens % scores_sum.shape[1]
__UpperCamelCase :int = next_tokens.unsqueeze(1)
__UpperCamelCase :Any = tokens[next_tokens_source]
__UpperCamelCase :Optional[int] = torch.cat((tokens, next_tokens) , dim=1)
__UpperCamelCase :Optional[Any] = generated[next_tokens_source]
__UpperCamelCase :str = scores_sum_average * seq_lengths
__UpperCamelCase :Union[str, Any] = is_stopped[next_tokens_source]
__UpperCamelCase :Optional[int] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
__UpperCamelCase :int = torch.cat((generated, next_token_embed) , dim=1)
__UpperCamelCase :List[str] = is_stopped + next_tokens.eq(__lowercase).squeeze()
if is_stopped.all():
break
__UpperCamelCase :Any = scores / seq_lengths
__UpperCamelCase :Optional[int] = scores.argsort(descending=__lowercase)
# tokens tensors are already padded to max_seq_length
__UpperCamelCase :int = [tokens[i] for i in order]
__UpperCamelCase :str = torch.stack(__lowercase , dim=0)
__UpperCamelCase :List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 43
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowercase = TypeVar('''T''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (position - 1) // 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (2 * position) + 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (2 * position) + 2
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self) -> None:
__UpperCamelCase :list[tuple[T, int]] = []
__UpperCamelCase :dict[T, int] = {}
__UpperCamelCase :int = 0
def __len__( self) -> int:
return self.elements
def __repr__( self) -> str:
return str(self.heap)
def UpperCamelCase__ ( self) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight))
__UpperCamelCase :Optional[Any] = self.elements
self.elements += 1
self._bubble_up(__lowercase)
def UpperCamelCase__ ( self) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
__UpperCamelCase , __UpperCamelCase :Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.heap[0]
self._bubble_down(__lowercase)
return elem
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
# Update the weight of the given key
__UpperCamelCase :List[str] = self.position_map[elem]
__UpperCamelCase :List[str] = (elem, weight)
if position > 0:
__UpperCamelCase :str = get_parent_position(__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowercase)
else:
self._bubble_down(__lowercase)
else:
self._bubble_down(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCamelCase :Any = self.position_map[elem]
if curr_pos == 0:
return None
__UpperCamelCase :List[str] = get_parent_position(__lowercase)
__UpperCamelCase , __UpperCamelCase :List[Any] = self.heap[curr_pos]
__UpperCamelCase , __UpperCamelCase :int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowercase , __lowercase)
return self._bubble_up(__lowercase)
return None
def UpperCamelCase__ ( self , __lowercase) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCamelCase :int = self.position_map[elem]
__UpperCamelCase , __UpperCamelCase :List[Any] = self.heap[curr_pos]
__UpperCamelCase :int = get_child_left_position(__lowercase)
__UpperCamelCase :Optional[Any] = get_child_right_position(__lowercase)
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.heap[child_left_position]
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase)
return self._bubble_down(__lowercase)
if child_left_position < self.elements:
__UpperCamelCase , __UpperCamelCase :int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowercase , __lowercase)
return self._bubble_down(__lowercase)
else:
return None
if child_right_position < self.elements:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase)
return self._bubble_down(__lowercase)
return None
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
# Swap the nodes at the given positions
__UpperCamelCase :Union[str, Any] = self.heap[nodea_pos][0]
__UpperCamelCase :Dict = self.heap[nodea_pos][0]
__UpperCamelCase , __UpperCamelCase :int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCamelCase :Any = nodea_pos
__UpperCamelCase :Tuple = nodea_pos
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self) -> None:
__UpperCamelCase :dict[T, dict[T, int]] = {}
__UpperCamelCase :int = 0
def __repr__( self) -> str:
return str(self.connections)
def __len__( self) -> int:
return self.nodes
def UpperCamelCase__ ( self , __lowercase) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCamelCase :List[str] = {}
self.nodes += 1
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowercase)
self.add_node(__lowercase)
__UpperCamelCase :Dict = weight
__UpperCamelCase :Union[str, Any] = weight
def lowerCamelCase ( SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :dict[T, int] = {node: maxsize for node in graph.connections}
__UpperCamelCase :dict[T, T | None] = {node: None for node in graph.connections}
__UpperCamelCase :MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCamelCase :Optional[int] = priority_queue.extract_min()
__UpperCamelCase :Any = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCamelCase :Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCamelCase :Any = node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCamelCase :str = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCamelCase :List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCamelCase :Union[str, Any] = node
return dist, parent
| 43
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 1
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowercase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if rng is None:
__UpperCamelCase :Union[str, Any] = random.Random()
__UpperCamelCase :int = 1
for dim in shape:
total_dims *= dim
__UpperCamelCase :str = []
for _ in range(SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCamelCase :List[str] = np.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE )
return output
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = ids_tensor(SCREAMING_SNAKE_CASE , vocab_size=2 , rng=SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
__UpperCamelCase :List[Any] = 1
return attn_mask
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
a__ : List[Any] = None
a__ : Union[str, Any] = ()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCamelCase :List[str] = 2
__UpperCamelCase :Tuple = inputs['''input_ids'''].shape[-1] // 2
__UpperCamelCase :Optional[Any] = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__UpperCamelCase :str = jnp.ones_like(__lowercase)
__UpperCamelCase :int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCamelCase :Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCamelCase :List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = self._get_input_ids_and_config()
__UpperCamelCase :Any = False
__UpperCamelCase :Tuple = max_length
__UpperCamelCase :Optional[int] = 0
for model_class in self.all_generative_model_classes:
__UpperCamelCase :List[Any] = model_class(__lowercase)
__UpperCamelCase :Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase :List[str] = getattr(__lowercase , __lowercase)
__UpperCamelCase :Tuple = pt_model_class(__lowercase).eval()
__UpperCamelCase :Tuple = load_flax_weights_in_pytorch_model(__lowercase , flax_model.params)
__UpperCamelCase :int = flax_model.generate(__lowercase).sequences
__UpperCamelCase :List[str] = pt_model.generate(torch.tensor(__lowercase , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCamelCase :Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_input_ids_and_config()
__UpperCamelCase :Tuple = False
__UpperCamelCase :str = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(__lowercase)
__UpperCamelCase :Optional[Any] = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :Dict = jit(model.generate)
__UpperCamelCase :str = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = self._get_input_ids_and_config()
__UpperCamelCase :int = True
__UpperCamelCase :List[Any] = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :List[str] = model_class(__lowercase)
__UpperCamelCase :List[str] = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :int = jit(model.generate)
__UpperCamelCase :List[Any] = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._get_input_ids_and_config()
__UpperCamelCase :Any = False
__UpperCamelCase :Any = max_length
__UpperCamelCase :Any = 2
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Optional[int] = model_class(__lowercase)
__UpperCamelCase :Tuple = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :Optional[int] = jit(model.generate)
__UpperCamelCase :Dict = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = self._get_input_ids_and_config()
__UpperCamelCase :Any = False
__UpperCamelCase :Any = max_length
__UpperCamelCase :int = 2
__UpperCamelCase :List[str] = 2
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Any = model_class(__lowercase)
__UpperCamelCase :Union[str, Any] = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_input_ids_and_config()
__UpperCamelCase :Tuple = True
__UpperCamelCase :int = max_length
__UpperCamelCase :Tuple = 0.8
__UpperCamelCase :Union[str, Any] = 10
__UpperCamelCase :List[Any] = 0.3
__UpperCamelCase :Any = 1
__UpperCamelCase :str = 8
__UpperCamelCase :Dict = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :str = model_class(__lowercase)
__UpperCamelCase :Any = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :Union[str, Any] = jit(model.generate)
__UpperCamelCase :Union[str, Any] = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = self._get_input_ids_and_config()
__UpperCamelCase :int = max_length
__UpperCamelCase :str = 1
__UpperCamelCase :Any = 8
__UpperCamelCase :str = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(__lowercase)
__UpperCamelCase :str = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :List[Any] = jit(model.generate)
__UpperCamelCase :Union[str, Any] = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = self._get_input_ids_and_config()
__UpperCamelCase :Union[str, Any] = max_length
__UpperCamelCase :Union[str, Any] = 2
__UpperCamelCase :List[str] = 1
__UpperCamelCase :Tuple = 8
__UpperCamelCase :Optional[int] = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :List[Any] = model_class(__lowercase)
__UpperCamelCase :str = model.generate(__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :List[str] = jit(model.generate)
__UpperCamelCase :Dict = jit_generate(__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :List[Any] = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :int = False
__UpperCamelCase :List[str] = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :int = model.generate(__lowercase , attention_mask=__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :Optional[Any] = jit(model.generate)
__UpperCamelCase :str = jit_generate(__lowercase , attention_mask=__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :int = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :int = True
__UpperCamelCase :int = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(__lowercase)
__UpperCamelCase :str = model.generate(__lowercase , attention_mask=__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :Any = jit(model.generate)
__UpperCamelCase :int = jit_generate(__lowercase , attention_mask=__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :Dict = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :List[str] = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(__lowercase)
__UpperCamelCase :List[Any] = model.generate(__lowercase , attention_mask=__lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowercase)
__UpperCamelCase :int = jit(model.generate)
__UpperCamelCase :int = jit_generate(__lowercase , attention_mask=__lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
__UpperCamelCase :Any = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
__UpperCamelCase :Optional[Any] = '''Hello world'''
__UpperCamelCase :int = tokenizer(__lowercase , return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowercase , '''do_samples'''):
model.generate(__lowercase , do_samples=__lowercase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowercase , '''foo'''):
__UpperCamelCase :List[Any] = {'''foo''': '''bar'''}
model.generate(__lowercase , **__lowercase)
| 43
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = to_pil_image(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :str = pil_image.size
__UpperCamelCase :str = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type='''dict''' , config=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__UpperCamelCase :Dict = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
__UpperCamelCase :Union[str, Any] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :Dict = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
__UpperCamelCase :str = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase :int = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
__UpperCamelCase :Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :Any = size if size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :str = get_size_dict(__lowercase)
__UpperCamelCase :Tuple = do_resize
__UpperCamelCase :Tuple = size
__UpperCamelCase :Union[str, Any] = resample
__UpperCamelCase :str = do_rescale
__UpperCamelCase :Optional[int] = rescale_value
__UpperCamelCase :List[str] = do_normalize
__UpperCamelCase :Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase :str = image_std if image_std is not None else IMAGENET_STANDARD_STD
__UpperCamelCase :str = apply_ocr
__UpperCamelCase :Optional[Any] = ocr_lang
__UpperCamelCase :Optional[Any] = tesseract_config
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :List[str] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
__UpperCamelCase :Any = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :Optional[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Dict = size if size is not None else self.size
__UpperCamelCase :Union[str, Any] = get_size_dict(__lowercase)
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
__UpperCamelCase :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :str = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Any = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :List[str] = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase :int = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase :Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase :Optional[int] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''')
# All transformations expect numpy arrays.
__UpperCamelCase :Any = [to_numpy_array(__lowercase) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''')
__UpperCamelCase :str = []
__UpperCamelCase :Optional[Any] = []
for image in images:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = apply_tesseract(__lowercase , __lowercase , __lowercase)
words_batch.append(__lowercase)
boxes_batch.append(__lowercase)
if do_resize:
__UpperCamelCase :Dict = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Tuple = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :List[str] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :Dict = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :Any = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase)
if apply_ocr:
__UpperCamelCase :List[Any] = words_batch
__UpperCamelCase :int = boxes_batch
return data
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase__ ( self , __lowercase=0) -> Optional[int]:
__UpperCamelCase :Dict = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase))
__UpperCamelCase :List[Any] = np.random.RandomState(__lowercase)
__UpperCamelCase :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Union[str, Any] = self.get_dummy_inputs()
__UpperCamelCase :Tuple = pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :List[str] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Any = self.get_dummy_inputs()
__UpperCamelCase :List[Any] = pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :List[Any] = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
# warmup pass to apply optimizations
__UpperCamelCase :str = pipe(**self.get_dummy_inputs())
__UpperCamelCase :Optional[int] = self.get_dummy_inputs()
__UpperCamelCase :Optional[int] = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Any = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :int = self.get_dummy_inputs()
__UpperCamelCase :int = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :int = self.get_dummy_inputs()
__UpperCamelCase :List[str] = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Dict = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs()
__UpperCamelCase :Dict = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Any = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[Any] = ort.SessionOptions()
__UpperCamelCase :str = False
return options
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCamelCase :Optional[Any] = init_image.resize((768, 512))
# using the PNDM scheduler by default
__UpperCamelCase :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCamelCase :Any = np.random.RandomState(0)
__UpperCamelCase :int = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :str = output.images
__UpperCamelCase :Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCamelCase :str = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCamelCase :Tuple = init_image.resize((768, 512))
__UpperCamelCase :Optional[int] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCamelCase :Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCamelCase :str = np.random.RandomState(0)
__UpperCamelCase :List[str] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :Dict = output.images
__UpperCamelCase :Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCamelCase :Dict = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 43
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = SwinConfig()
__UpperCamelCase :Tuple = swin_name.split('''_''' )
__UpperCamelCase :Union[str, Any] = name_split[1]
__UpperCamelCase :List[Any] = int(name_split[4] )
__UpperCamelCase :Tuple = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase :List[str] = 96
__UpperCamelCase :int = (2, 2, 6, 2)
__UpperCamelCase :str = (3, 6, 12, 24)
elif model_size == "small":
__UpperCamelCase :str = 96
__UpperCamelCase :int = (2, 2, 18, 2)
__UpperCamelCase :int = (3, 6, 12, 24)
elif model_size == "base":
__UpperCamelCase :List[Any] = 128
__UpperCamelCase :List[str] = (2, 2, 18, 2)
__UpperCamelCase :List[str] = (4, 8, 16, 32)
else:
__UpperCamelCase :str = 192
__UpperCamelCase :Union[str, Any] = (2, 2, 18, 2)
__UpperCamelCase :Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCamelCase :Dict = 21_841
else:
__UpperCamelCase :List[str] = 1_000
__UpperCamelCase :str = '''huggingface/label-files'''
__UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
__UpperCamelCase :str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :Optional[Any] = idalabel
__UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase :Union[str, Any] = img_size
__UpperCamelCase :Any = num_classes
__UpperCamelCase :Union[str, Any] = embed_dim
__UpperCamelCase :Optional[Any] = depths
__UpperCamelCase :Any = num_heads
__UpperCamelCase :str = window_size
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "patch_embed.proj" in name:
__UpperCamelCase :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase :int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase :List[Any] = '''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase :List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase :List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase :List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase :Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase :Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase :Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase :Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase :Optional[Any] = '''layernorm.bias'''
if "head" in name:
__UpperCamelCase :Dict = name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase :Dict = '''swin.''' + name
return name
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase :Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase :Tuple = key.split('''.''' )
__UpperCamelCase :Union[str, Any] = int(key_split[1] )
__UpperCamelCase :str = int(key_split[3] )
__UpperCamelCase :Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase :Dict = val[:dim, :]
__UpperCamelCase :List[str] = val[
dim : dim * 2, :
]
__UpperCamelCase :List[str] = val[-dim:, :]
else:
__UpperCamelCase :int = val[
:dim
]
__UpperCamelCase :List[Any] = val[
dim : dim * 2
]
__UpperCamelCase :Any = val[
-dim:
]
else:
__UpperCamelCase :List[str] = val
return orig_state_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
__UpperCamelCase :List[Any] = get_swin_config(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase :Union[str, Any] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase :Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__UpperCamelCase :Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__UpperCamelCase :Any = timm_model(inputs['''pixel_values'''] )
__UpperCamelCase :List[Any] = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 43
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowercase = '''.'''
if __name__ == "__main__":
__lowercase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__lowercase = []
__lowercase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowercase = line.strip()
__lowercase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowercase = '''\n'''.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 43
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=2 , __lowercase=99 , __lowercase=0 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=12 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase="last" , __lowercase=None , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :Dict = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Dict = use_input_lengths
__UpperCamelCase :Tuple = use_token_type_ids
__UpperCamelCase :Union[str, Any] = use_labels
__UpperCamelCase :Dict = gelu_activation
__UpperCamelCase :int = sinusoidal_embeddings
__UpperCamelCase :Optional[int] = causal
__UpperCamelCase :int = asm
__UpperCamelCase :Tuple = n_langs
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Tuple = n_special
__UpperCamelCase :str = hidden_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :int = num_attention_heads
__UpperCamelCase :Optional[Any] = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Tuple = max_position_embeddings
__UpperCamelCase :Dict = type_vocab_size
__UpperCamelCase :Tuple = type_sequence_label_size
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :int = num_labels
__UpperCamelCase :Optional[Any] = num_choices
__UpperCamelCase :List[Any] = summary_type
__UpperCamelCase :str = use_proj
__UpperCamelCase :List[Any] = scope
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Optional[Any] = None
if self.use_input_lengths:
__UpperCamelCase :str = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :List[str] = None
if self.use_token_type_ids:
__UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :List[str] = None
__UpperCamelCase :str = None
if self.use_labels:
__UpperCamelCase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2).float()
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
__UpperCamelCase :Dict = FlaubertModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :str = model(__lowercase , lengths=__lowercase , langs=__lowercase)
__UpperCamelCase :Tuple = model(__lowercase , langs=__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = FlaubertWithLMHeadModel(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[int] = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :int = FlaubertForQuestionAnsweringSimple(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Tuple = FlaubertForQuestionAnswering(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :Dict = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , p_mask=__lowercase , )
__UpperCamelCase :int = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , )
((__UpperCamelCase) , ) :Dict = result_with_labels.to_tuple()
__UpperCamelCase :Union[str, Any] = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase)
((__UpperCamelCase) , ) :Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :int = FlaubertForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = model(__lowercase)
__UpperCamelCase :List[Any] = model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.num_labels
__UpperCamelCase :List[str] = FlaubertForTokenClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :List[str] = self.num_choices
__UpperCamelCase :int = FlaubertForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :int = config_and_inputs
__UpperCamelCase :List[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :Union[str, Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCamelCase :Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase)
__UpperCamelCase :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase)
return inputs_dict
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = FlaubertModelTester(self)
__UpperCamelCase :Any = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Tuple = FlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@slow
@require_torch_gpu
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCamelCase :int = True
__UpperCamelCase :List[str] = model_class(config=__lowercase)
__UpperCamelCase :Dict = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :List[str] = torch.jit.trace(
__lowercase , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , '''traced_model.pt'''))
__UpperCamelCase :Optional[Any] = torch.jit.load(os.path.join(__lowercase , '''traced_model.pt''') , map_location=__lowercase)
loaded(inputs_dict['''input_ids'''].to(__lowercase) , inputs_dict['''attention_mask'''].to(__lowercase))
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''')
__UpperCamelCase :Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
with torch.no_grad():
__UpperCamelCase :str = model(__lowercase)[0]
__UpperCamelCase :Optional[Any] = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
| 43
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 1
|
from collections.abc import Callable
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase = None) -> None:
# Stores actual heap items.
__UpperCamelCase :list = []
# Stores indexes of each item for supporting updates and deletion.
__UpperCamelCase :dict = {}
# Stores current size of heap.
__UpperCamelCase :str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__UpperCamelCase :int = key or (lambda __lowercase: x)
def UpperCamelCase__ ( self , __lowercase) -> int | None:
return int((i - 1) / 2) if i > 0 else None
def UpperCamelCase__ ( self , __lowercase) -> int | None:
__UpperCamelCase :Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def UpperCamelCase__ ( self , __lowercase) -> int | None:
__UpperCamelCase :Optional[int] = int(2 * i + 2)
return right if 0 < right < self.size else None
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase , __UpperCamelCase :Optional[int] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__UpperCamelCase , __UpperCamelCase :List[Any] = self.arr[j], self.arr[i]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> bool:
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :List[str] = self._left(__lowercase)
__UpperCamelCase :Tuple = self._right(__lowercase)
__UpperCamelCase :List[str] = i
if left is not None and not self._cmp(__lowercase , __lowercase):
__UpperCamelCase :Optional[Any] = left
if right is not None and not self._cmp(__lowercase , __lowercase):
__UpperCamelCase :Optional[Any] = right
return valid_parent
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :Optional[int] = self._parent(__lowercase)
while parent is not None and not self._cmp(__lowercase , __lowercase):
self._swap(__lowercase , __lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = parent, self._parent(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :List[str] = self._get_valid_parent(__lowercase)
while valid_parent != index:
self._swap(__lowercase , __lowercase)
__UpperCamelCase , __UpperCamelCase :Dict = valid_parent, self._get_valid_parent(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
if item not in self.pos_map:
return
__UpperCamelCase :Any = self.pos_map[item]
__UpperCamelCase :Union[str, Any] = [item, self.key(__lowercase)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
if item not in self.pos_map:
return
__UpperCamelCase :Any = self.pos_map[item]
del self.pos_map[item]
__UpperCamelCase :Any = self.arr[self.size - 1]
__UpperCamelCase :Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :Optional[Any] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase)])
else:
__UpperCamelCase :Optional[Any] = [item, self.key(__lowercase)]
__UpperCamelCase :List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def UpperCamelCase__ ( self) -> tuple | None:
return self.arr[0] if self.size else None
def UpperCamelCase__ ( self) -> tuple | None:
__UpperCamelCase :Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 1
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase = "" , __lowercase = False) -> None:
# Mapping from the first character of the prefix of the node
__UpperCamelCase :dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__UpperCamelCase :str = is_leaf
__UpperCamelCase :Union[str, Any] = prefix
def UpperCamelCase__ ( self , __lowercase) -> tuple[str, str, str]:
__UpperCamelCase :Dict = 0
for q, w in zip(self.prefix , __lowercase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self , __lowercase) -> None:
for word in words:
self.insert(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCamelCase :Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCamelCase :Optional[int] = RadixNode(prefix=__lowercase , is_leaf=__lowercase)
else:
__UpperCamelCase :Optional[int] = self.nodes[word[0]]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = incoming_node.match(
__lowercase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCamelCase :Optional[Any] = remaining_prefix
__UpperCamelCase :str = self.nodes[matching_string[0]]
__UpperCamelCase :Any = RadixNode(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = aux_node
if remaining_word == "":
__UpperCamelCase :List[str] = True
else:
self.nodes[matching_string[0]].insert(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> bool:
__UpperCamelCase :Dict = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> bool:
__UpperCamelCase :List[Any] = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
__UpperCamelCase :Dict = list(self.nodes.values())[0]
__UpperCamelCase :Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCamelCase :Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
__UpperCamelCase :int = False
# If there is 1 edge, we merge it with its child
else:
__UpperCamelCase :Union[str, Any] = list(incoming_node.nodes.values())[0]
__UpperCamelCase :Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCamelCase :Any = merging_node.nodes
return True
def UpperCamelCase__ ( self , __lowercase = 0) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Any = '''banana bananas bandana band apple all beast'''.split()
__UpperCamelCase :Any = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Any = RadixNode()
__UpperCamelCase :Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('''Words:''' , SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 43
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowercase = '''http://www.mocksite.com/file1.txt'''
__lowercase = '''"text": ["foo", "foo"]'''
__lowercase = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[Any] = 2_0_0
a__ : Union[str, Any] = {"""Content-Length""": """100"""}
a__ : Tuple = {}
def UpperCamelCase__ ( self , **__lowercase) -> Dict:
return [bytes(__lowercase , '''utf-8''')]
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE , '''request''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = URL
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = url
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = [url]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = {'''train''': url}
__UpperCamelCase :Tuple = '''dummy'''
__UpperCamelCase :Optional[Any] = '''downloads'''
__UpperCamelCase :Any = tmp_path
__UpperCamelCase :Tuple = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , use_etag=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = [downloaded_paths]
__UpperCamelCase :Tuple = [urls]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
__UpperCamelCase :Tuple = downloaded_paths.values()
__UpperCamelCase :int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCamelCase :Optional[int] = Path(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCamelCase :Optional[int] = downloaded_path.read_text()
assert content == CONTENT
__UpperCamelCase :List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__UpperCamelCase :List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = str(SCREAMING_SNAKE_CASE )
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = {'''train''': filename}
__UpperCamelCase :Any = '''dummy'''
__UpperCamelCase :Dict = xz_file.parent
__UpperCamelCase :str = '''extracted'''
__UpperCamelCase :str = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE , use_etag=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :int = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = dl_manager.extract(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = [extracted_paths]
__UpperCamelCase :Tuple = [paths]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
__UpperCamelCase :Optional[int] = extracted_paths.values()
__UpperCamelCase :Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE , etag=SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCamelCase :int = extracted_path.read_text()
__UpperCamelCase :List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
__UpperCamelCase :Tuple = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = request.getfixturevalue(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = request.getfixturevalue(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 43
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=6 , __lowercase=17 , __lowercase=23 , __lowercase=11 , __lowercase=True , ) -> Union[str, Any]:
__UpperCamelCase :Any = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :str = seq_length
__UpperCamelCase :List[Any] = act_dim
__UpperCamelCase :List[Any] = state_dim
__UpperCamelCase :Dict = hidden_size
__UpperCamelCase :Optional[Any] = max_length
__UpperCamelCase :Dict = is_training
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
__UpperCamelCase :Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
__UpperCamelCase :int = floats_tensor((self.batch_size, self.seq_length, 1))
__UpperCamelCase :str = floats_tensor((self.batch_size, self.seq_length, 1))
__UpperCamelCase :List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000)
__UpperCamelCase :str = random_attention_mask((self.batch_size, self.seq_length))
__UpperCamelCase :List[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase__ ( self) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :Optional[Any] = DecisionTransformerModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Optional[Any] = config_and_inputs
__UpperCamelCase :int = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (DecisionTransformerModel,) if is_torch_available() else ()
a__ : List[str] = ()
a__ : str = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a__ : Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a__ : Optional[int] = False
a__ : Any = False
a__ : str = False
a__ : List[str] = False
a__ : str = False
a__ : Any = False
a__ : Dict = False
a__ : Union[str, Any] = False
a__ : Optional[Any] = False
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Union[str, Any] = DecisionTransformerModelTester(self)
__UpperCamelCase :List[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Tuple:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :List[str] = DecisionTransformerModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Any = model_class(__lowercase)
__UpperCamelCase :Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Dict = [*signature.parameters.keys()]
__UpperCamelCase :List[Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__lowercase)] , __lowercase)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = 2 # number of steps of autoregressive prediction we will perform
__UpperCamelCase :List[Any] = 10 # defined by the RL environment, may be normalized
__UpperCamelCase :Dict = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
__UpperCamelCase :str = model.to(__lowercase)
__UpperCamelCase :Optional[Any] = model.config
torch.manual_seed(0)
__UpperCamelCase :Dict = torch.randn(1 , 1 , config.state_dim).to(device=__lowercase , dtype=torch.floataa) # env.reset()
__UpperCamelCase :Dict = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__lowercase)
__UpperCamelCase :List[str] = torch.tensor(__lowercase , device=__lowercase , dtype=torch.floataa).reshape(1 , 1 , 1)
__UpperCamelCase :List[Any] = state
__UpperCamelCase :Tuple = torch.zeros(1 , 0 , config.act_dim , device=__lowercase , dtype=torch.floataa)
__UpperCamelCase :List[str] = torch.zeros(1 , 0 , device=__lowercase , dtype=torch.floataa)
__UpperCamelCase :List[Any] = torch.tensor(0 , device=__lowercase , dtype=torch.long).reshape(1 , 1)
for step in range(__lowercase):
__UpperCamelCase :Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowercase)] , dim=1)
__UpperCamelCase :Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowercase)] , dim=1)
__UpperCamelCase :List[str] = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = model(
states=__lowercase , actions=__lowercase , rewards=__lowercase , returns_to_go=__lowercase , timesteps=__lowercase , attention_mask=__lowercase , return_dict=__lowercase , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4))
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=__lowercase , dtype=torch.floataa),
1.0,
False,
{},
)
__UpperCamelCase :Any = action_pred[0, -1]
__UpperCamelCase :List[Any] = torch.cat([states, state] , dim=1)
__UpperCamelCase :str = returns_to_go[0, -1] - reward
__UpperCamelCase :Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
__UpperCamelCase :Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowercase , dtype=torch.long) * (step + 1)] , dim=1)
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
__UpperCamelCase :Optional[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(SCREAMING_SNAKE_CASE ) )
return round(SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase :Union[str, Any] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
__UpperCamelCase :Any = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , SCREAMING_SNAKE_CASE ):
pass
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , SCREAMING_SNAKE_CASE ) is None
with patch_submodule(_test_patching , '''len''' , SCREAMING_SNAKE_CASE ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = '''__test_patch_submodule_start_and_stop_mock__'''
__UpperCamelCase :Dict = patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase :Dict = '''__test_patch_submodule_successive_join__'''
__UpperCamelCase :Optional[Any] = '''__test_patch_submodule_successive_dirname__'''
__UpperCamelCase :List[Any] = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE ):
pass
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowercase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase :List[Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :List[str] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__UpperCamelCase :Optional[Any] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__UpperCamelCase :Tuple = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase :Dict = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__lowercase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowercase = '''base_with_context'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__UpperCamelCase :Any = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase :Any = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Union[str, Any] = ly_weight['''attention''']
__UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :str = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase :Tuple = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :Any = ly_weight['''attention''']
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__UpperCamelCase :Any = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase :List[Any] = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :Any = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase :str = ly_weight['''self_attention''']
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :str = ly_weight['''MultiHeadDotProductAttention_0''']
__UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase :int = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__UpperCamelCase :Optional[int] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__UpperCamelCase :Any = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__UpperCamelCase :List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase :Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase :Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase :List[str] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__UpperCamelCase :List[str] = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE , continuous_encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , melgan=SCREAMING_SNAKE_CASE , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowercase = parser.parse_args()
main(args)
| 43
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
prior=__lowercase , image_encoder=__lowercase , image_processor=__lowercase , scheduler=__lowercase , renderer=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[Any]:
if latents is None:
__UpperCamelCase :Any = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase)
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
__UpperCamelCase :List[Any] = latents.to(__lowercase)
__UpperCamelCase :int = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , __lowercase=0) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCamelCase :Tuple = torch.device(f"""cuda:{gpu_id}""")
__UpperCamelCase :str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase)
@property
def UpperCamelCase__ ( self) -> Tuple:
if self.device != torch.device('''meta''') or not hasattr(self.image_encoder , '''_hf_hook'''):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowercase , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str:
if isinstance(__lowercase , __lowercase) and isinstance(image[0] , torch.Tensor):
__UpperCamelCase :Union[str, Any] = torch.cat(__lowercase , axis=0) if image[0].ndim == 4 else torch.stack(__lowercase , axis=0)
if not isinstance(__lowercase , torch.Tensor):
__UpperCamelCase :Any = self.image_processor(__lowercase , return_tensors='''pt''').pixel_values[0].unsqueeze(0)
__UpperCamelCase :Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=__lowercase)
__UpperCamelCase :Union[str, Any] = self.image_encoder(__lowercase)['''last_hidden_state''']
__UpperCamelCase :Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__UpperCamelCase :Any = image_embeds.repeat_interleave(__lowercase , dim=0)
if do_classifier_free_guidance:
__UpperCamelCase :Dict = torch.zeros_like(__lowercase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase :Any = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowercase)
def __call__( self , __lowercase , __lowercase = 1 , __lowercase = 25 , __lowercase = None , __lowercase = None , __lowercase = 4.0 , __lowercase = 64 , __lowercase = "pil" , __lowercase = True , ) -> List[Any]:
if isinstance(__lowercase , PIL.Image.Image):
__UpperCamelCase :List[Any] = 1
elif isinstance(__lowercase , torch.Tensor):
__UpperCamelCase :str = image.shape[0]
elif isinstance(__lowercase , __lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
__UpperCamelCase :Dict = len(__lowercase)
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowercase)}""")
__UpperCamelCase :Tuple = self._execution_device
__UpperCamelCase :List[Any] = batch_size * num_images_per_prompt
__UpperCamelCase :List[Any] = guidance_scale > 1.0
__UpperCamelCase :str = self._encode_image(__lowercase , __lowercase , __lowercase , __lowercase)
# prior
self.scheduler.set_timesteps(__lowercase , device=__lowercase)
__UpperCamelCase :str = self.scheduler.timesteps
__UpperCamelCase :str = self.prior.config.num_embeddings
__UpperCamelCase :Optional[Any] = self.prior.config.embedding_dim
__UpperCamelCase :List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__UpperCamelCase :List[Any] = latents.reshape(latents.shape[0] , __lowercase , __lowercase)
for i, t in enumerate(self.progress_bar(__lowercase)):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase :int = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCamelCase :Dict = self.scheduler.scale_model_input(__lowercase , __lowercase)
__UpperCamelCase :List[Any] = self.prior(
__lowercase , timestep=__lowercase , proj_embedding=__lowercase , ).predicted_image_embedding
# remove the variance
__UpperCamelCase , __UpperCamelCase :Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = noise_pred.chunk(2)
__UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__UpperCamelCase :Optional[int] = self.scheduler.step(
__lowercase , timestep=__lowercase , sample=__lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowercase)
__UpperCamelCase :List[Any] = []
for i, latent in enumerate(__lowercase):
print()
__UpperCamelCase :Any = self.renderer.decode(
latent[None, :] , __lowercase , size=__lowercase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowercase)
__UpperCamelCase :List[str] = torch.stack(__lowercase)
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""")
__UpperCamelCase :Optional[int] = images.cpu().numpy()
if output_type == "pil":
__UpperCamelCase :Optional[Any] = [self.numpy_to_pil(__lowercase) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowercase)
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowercase = 2048
__lowercase = 4096
__lowercase = 42
__lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
__lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
__UpperCamelCase :str = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__UpperCamelCase :List[Any] = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__UpperCamelCase :Union[str, Any] = {'''id''': example['''id''']}
__UpperCamelCase :Optional[Any] = example['''annotations''']
__UpperCamelCase :Tuple = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__UpperCamelCase :List[Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__UpperCamelCase :Tuple = []
__UpperCamelCase :List[Any] = []
__UpperCamelCase :Tuple = ['''<cls>''']
else:
__UpperCamelCase :Optional[Any] = ['''short''']
__UpperCamelCase :Union[str, Any] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__UpperCamelCase :int = ['''long''']
__UpperCamelCase :List[Any] = choose_first(annotation['''long_answer'''] , is_long_answer=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = []
answer.update(SCREAMING_SNAKE_CASE )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__UpperCamelCase :Any = True
else:
__UpperCamelCase :str = False
__UpperCamelCase :Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Tuple = _get_single_answer(SCREAMING_SNAKE_CASE )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase :str = example['''document''']['''tokens''']
__UpperCamelCase :Any = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__UpperCamelCase :Optional[int] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__UpperCamelCase :Optional[Any] = example['''document''']['''tokens''']
__UpperCamelCase :List[Any] = answer['''start_token''']
__UpperCamelCase :Dict = answer['''end_token''']
__UpperCamelCase :Tuple = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__UpperCamelCase :Optional[int] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__UpperCamelCase :str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCamelCase :Union[str, Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCamelCase :Dict = ''' '''.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , SCREAMING_SNAKE_CASE , end='''\n''' )
print('''Old:''' , SCREAMING_SNAKE_CASE , end='''\n\n''' )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2_048 , SCREAMING_SNAKE_CASE=4_096 , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = get_context_and_ans(SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__UpperCamelCase :Optional[int] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__UpperCamelCase :Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase :int = []
__UpperCamelCase :Any = []
__UpperCamelCase :Tuple = input_ids[:q_len]
__UpperCamelCase :Any = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
for i in doc_start_indices:
__UpperCamelCase :Tuple = i + max_length - q_len
__UpperCamelCase :Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"end_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"category": category,
},
}
__UpperCamelCase :Tuple = out['''context'''].split()
__UpperCamelCase :Optional[Any] = splitted_context[answer['''end_token''']]
__UpperCamelCase :List[str] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE , ).input_ids )
__UpperCamelCase :Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__UpperCamelCase :Any = len(tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__UpperCamelCase :Optional[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__UpperCamelCase :List[Any] = answer['''start_token''']
__UpperCamelCase :Optional[Any] = answer['''end_token''']
if assertion:
__UpperCamelCase :Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , SCREAMING_SNAKE_CASE , end='''\n\n''' )
if len(SCREAMING_SNAKE_CASE ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__UpperCamelCase :List[str] = input_ids[:q_len]
__UpperCamelCase :Optional[Any] = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
__UpperCamelCase :str = []
__UpperCamelCase :List[str] = []
__UpperCamelCase :str = []
__UpperCamelCase :List[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
__UpperCamelCase :str = i + max_length - q_len
__UpperCamelCase :str = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__UpperCamelCase :Tuple = start_token - i + q_len
__UpperCamelCase :List[str] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__UpperCamelCase :Any = -100
__UpperCamelCase :Union[str, Any] = -100
answers_category.append('''null''' )
__UpperCamelCase :str = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE )
answers_end_token.append(SCREAMING_SNAKE_CASE )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(SCREAMING_SNAKE_CASE ) )
print('''Old:''' , tokenizer.decode(SCREAMING_SNAKE_CASE ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2_048 , SCREAMING_SNAKE_CASE=4_096 , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Any = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , doc_stride=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE , )
return example
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with jsonlines.open(SCREAMING_SNAKE_CASE , '''a''' ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE , total=len(SCREAMING_SNAKE_CASE ) , desc='''Saving samples ... ''' ):
__UpperCamelCase :Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowercase = load_dataset('''natural_questions''')
__lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__lowercase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 43
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 1
|
import os
from datetime import datetime as dt
from github import Github
__lowercase = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCamelCase :Optional[int] = g.get_repo('''huggingface/accelerate''' )
__UpperCamelCase :Dict = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCamelCase :Any = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__UpperCamelCase :Union[str, Any] = dt.utcnow()
__UpperCamelCase :Tuple = (current_time - issue.updated_at).days
__UpperCamelCase :str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 43
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = """lilt"""
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=0 , __lowercase="absolute" , __lowercase=None , __lowercase=4 , __lowercase=1_024 , **__lowercase , ) -> int:
super().__init__(pad_token_id=__lowercase , **__lowercase)
__UpperCamelCase :int = vocab_size
__UpperCamelCase :int = hidden_size
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :Optional[Any] = num_attention_heads
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :int = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :Tuple = max_position_embeddings
__UpperCamelCase :Tuple = type_vocab_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :List[str] = layer_norm_eps
__UpperCamelCase :str = position_embedding_type
__UpperCamelCase :List[Any] = classifier_dropout
__UpperCamelCase :Optional[Any] = channel_shrink_ratio
__UpperCamelCase :int = max_ad_position_embeddings
| 43
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 1
|
import requests
__lowercase = '''YOUR API KEY'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = giphy_api_key ):
'''simple docstring'''
__UpperCamelCase :Any = '''+'''.join(query.split() )
__UpperCamelCase :Union[str, Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__UpperCamelCase :str = requests.get(SCREAMING_SNAKE_CASE ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 43
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 1
|
from timeit import timeit
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__UpperCamelCase :str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__UpperCamelCase :Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase ( ):
'''simple docstring'''
def do_benchmark(SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE ) = }""" )
__UpperCamelCase :Optional[int] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=SCREAMING_SNAKE_CASE )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE ) = }""" )
__UpperCamelCase :Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=SCREAMING_SNAKE_CASE , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = (UniPCMultistepScheduler,)
a__ : int = (("""num_inference_steps""", 2_5),)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
__UpperCamelCase :Dict = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**__lowercase)
return config
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = dict(self.forward_default_kwargs)
__UpperCamelCase :List[Any] = kwargs.pop('''num_inference_steps''' , __lowercase)
__UpperCamelCase :Optional[int] = self.dummy_sample
__UpperCamelCase :Any = 0.1 * sample
__UpperCamelCase :Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Any = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Union[str, Any] = scheduler_class(**__lowercase)
scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals
__UpperCamelCase :Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase)
__UpperCamelCase :Any = scheduler_class.from_pretrained(__lowercase)
new_scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals
__UpperCamelCase :Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase :int = sample, sample
for t in range(__lowercase , time_step + scheduler.config.solver_order + 1):
__UpperCamelCase :Tuple = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
__UpperCamelCase :Union[str, Any] = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> Optional[Any]:
__UpperCamelCase :Tuple = dict(self.forward_default_kwargs)
__UpperCamelCase :Optional[Any] = kwargs.pop('''num_inference_steps''' , __lowercase)
__UpperCamelCase :Tuple = self.dummy_sample
__UpperCamelCase :Tuple = 0.1 * sample
__UpperCamelCase :Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :List[Any] = self.get_scheduler_config()
__UpperCamelCase :Optional[Any] = scheduler_class(**__lowercase)
scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase :List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase)
__UpperCamelCase :Any = scheduler_class.from_pretrained(__lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase)
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase :List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase :Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
__UpperCamelCase :List[Any] = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __lowercase=None , **__lowercase) -> int:
if scheduler is None:
__UpperCamelCase :Union[str, Any] = self.scheduler_classes[0]
__UpperCamelCase :Optional[int] = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Union[str, Any] = scheduler_class(**__lowercase)
__UpperCamelCase :Union[str, Any] = self.scheduler_classes[0]
__UpperCamelCase :Optional[int] = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Tuple = scheduler_class(**__lowercase)
__UpperCamelCase :Union[str, Any] = 10
__UpperCamelCase :Dict = self.dummy_model()
__UpperCamelCase :Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :List[str] = model(__lowercase , __lowercase)
__UpperCamelCase :int = scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
return sample
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Any = dict(self.forward_default_kwargs)
__UpperCamelCase :Tuple = kwargs.pop('''num_inference_steps''' , __lowercase)
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Dict = self.get_scheduler_config()
__UpperCamelCase :Tuple = scheduler_class(**__lowercase)
__UpperCamelCase :List[str] = self.dummy_sample
__UpperCamelCase :Any = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , '''set_timesteps'''):
scheduler.set_timesteps(__lowercase)
elif num_inference_steps is not None and not hasattr(__lowercase , '''set_timesteps'''):
__UpperCamelCase :str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase :Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
__UpperCamelCase :Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
__UpperCamelCase :Union[str, Any] = scheduler.timesteps[5]
__UpperCamelCase :Any = scheduler.timesteps[6]
__UpperCamelCase :Optional[Any] = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
__UpperCamelCase :List[Any] = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCamelCase__ ( self) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCamelCase :Any = UniPCMultistepScheduler(**self.get_scheduler_config())
__UpperCamelCase :Tuple = self.full_loop(scheduler=__lowercase)
__UpperCamelCase :List[Any] = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.24_64) < 1E-3
__UpperCamelCase :Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config)
__UpperCamelCase :Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Dict = DPMSolverMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Tuple = UniPCMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :List[Any] = self.full_loop(scheduler=__lowercase)
__UpperCamelCase :List[str] = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.24_64) < 1E-3
def UpperCamelCase__ ( self) -> Union[str, Any]:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
self.check_over_configs(thresholding=__lowercase)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , solver_order=__lowercase , solver_type=__lowercase , )
def UpperCamelCase__ ( self) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase)
def UpperCamelCase__ ( self) -> int:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowercase , solver_type=__lowercase , prediction_type=__lowercase , )
__UpperCamelCase :Dict = self.full_loop(
solver_order=__lowercase , solver_type=__lowercase , prediction_type=__lowercase , )
assert not torch.isnan(__lowercase).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=__lowercase)
self.check_over_configs(lower_order_final=__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__lowercase , time_step=0)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.full_loop()
__UpperCamelCase :Dict = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.24_64) < 1E-3
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.full_loop(prediction_type='''v_prediction''')
__UpperCamelCase :Optional[Any] = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.10_14) < 1E-3
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.scheduler_classes[0]
__UpperCamelCase :List[Any] = self.get_scheduler_config(thresholding=__lowercase , dynamic_thresholding_ratio=0)
__UpperCamelCase :Optional[Any] = scheduler_class(**__lowercase)
__UpperCamelCase :Optional[int] = 10
__UpperCamelCase :Tuple = self.dummy_model()
__UpperCamelCase :Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowercase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :Tuple = model(__lowercase , __lowercase)
__UpperCamelCase :str = scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase__ ( self , **__lowercase) -> Any:
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Tuple = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :List[Any] = scheduler_class(**__lowercase)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''PerceiverFeatureExtractor''']
__lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :str = 9, 14 # noqa: F841
__UpperCamelCase :Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__UpperCamelCase :Tuple = defaultdict(SCREAMING_SNAKE_CASE )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__UpperCamelCase :Any = mst(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__UpperCamelCase :Tuple = tuple(answer[:2] )
__UpperCamelCase :List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 43
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 1
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=3 , __lowercase=32 , __lowercase=3 , __lowercase=10 , __lowercase=[8, 16, 32, 64] , __lowercase=[1, 1, 2, 1] , __lowercase=True , __lowercase=True , __lowercase="relu" , __lowercase=3 , __lowercase=None , __lowercase=["stage2", "stage3", "stage4"] , __lowercase=[2, 3, 4] , __lowercase=1 , ) -> Dict:
__UpperCamelCase :List[Any] = parent
__UpperCamelCase :Optional[Any] = batch_size
__UpperCamelCase :int = image_size
__UpperCamelCase :Tuple = num_channels
__UpperCamelCase :List[Any] = embeddings_size
__UpperCamelCase :Dict = hidden_sizes
__UpperCamelCase :List[Any] = depths
__UpperCamelCase :str = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :int = hidden_act
__UpperCamelCase :str = num_labels
__UpperCamelCase :Tuple = scope
__UpperCamelCase :Dict = len(__lowercase)
__UpperCamelCase :Any = out_features
__UpperCamelCase :Any = out_indices
__UpperCamelCase :Optional[int] = num_groups
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :List[Any] = None
if self.use_labels:
__UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_labels)
__UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Tuple = BitModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :int = model(__lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Dict = self.num_labels
__UpperCamelCase :int = BitForImageClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :Dict = BitBackbone(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :List[str] = model(__lowercase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__UpperCamelCase :Dict = None
__UpperCamelCase :str = BitBackbone(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(__lowercase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = config_and_inputs
__UpperCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ : Union[str, Any] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
a__ : List[str] = False
a__ : Optional[Any] = False
a__ : Any = False
a__ : Union[str, Any] = False
a__ : List[Any] = False
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = BitModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self) -> Dict:
return
@unittest.skip(reason='''Bit does not output attentions''')
def UpperCamelCase__ ( self) -> int:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase , __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[int] = model_class(__lowercase)
__UpperCamelCase :int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :List[str] = [*signature.parameters.keys()]
__UpperCamelCase :List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[Any] = model_class(config=__lowercase)
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCamelCase__ ( self) -> Optional[Any]:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Dict = model_class(__lowercase)
model.to(__lowercase)
model.eval()
with torch.no_grad():
__UpperCamelCase :Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase :Any = self.model_tester.num_stages
self.assertEqual(len(__lowercase) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase :str = layer_type
__UpperCamelCase :Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase :Dict = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
@unittest.skip(reason='''Bit does not use feedforward chunking''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Dict = BitModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__lowercase)
__UpperCamelCase :List[Any] = self.default_image_processor
__UpperCamelCase :List[str] = prepare_img()
__UpperCamelCase :Any = image_processor(images=__lowercase , return_tensors='''pt''').to(__lowercase)
# forward pass
with torch.no_grad():
__UpperCamelCase :Any = model(**__lowercase)
# verify the logits
__UpperCamelCase :Tuple = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]]).to(__lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4))
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (BitBackbone,) if is_torch_available() else ()
a__ : Dict = BitConfig
a__ : Any = False
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = BitModelTester(self)
| 43
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = RobertaConfig
a__ : List[str] = """roberta"""
def __init__( self , __lowercase) -> str:
super().__init__(__lowercase)
__UpperCamelCase :Dict = RobertaEmbeddings(__lowercase)
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = RobertaConfig
a__ : str = """roberta"""
def __init__( self , __lowercase) -> Any:
super().__init__(__lowercase)
__UpperCamelCase :List[str] = config.num_labels
__UpperCamelCase :List[Any] = config.num_hidden_layers
__UpperCamelCase :Optional[Any] = DeeRobertaModel(__lowercase)
__UpperCamelCase :Any = nn.Dropout(config.hidden_dropout_prob)
__UpperCamelCase :str = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__lowercase)
def UpperCamelCase__ ( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=-1 , __lowercase=False , ) -> List[Any]:
__UpperCamelCase :List[str] = self.num_layers
try:
__UpperCamelCase :Tuple = self.roberta(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
__UpperCamelCase :int = outputs[1]
__UpperCamelCase :Optional[int] = self.dropout(__lowercase)
__UpperCamelCase :Any = self.classifier(__lowercase)
__UpperCamelCase :Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase :Optional[Any] = e.message
__UpperCamelCase :List[str] = e.exit_layer
__UpperCamelCase :Any = outputs[0]
if not self.training:
__UpperCamelCase :Optional[Any] = entropy(__lowercase)
__UpperCamelCase :Dict = []
__UpperCamelCase :int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase :Dict = MSELoss()
__UpperCamelCase :str = loss_fct(logits.view(-1) , labels.view(-1))
else:
__UpperCamelCase :Optional[Any] = CrossEntropyLoss()
__UpperCamelCase :Any = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__UpperCamelCase :List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase :Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase :Any = MSELoss()
__UpperCamelCase :Optional[Any] = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__UpperCamelCase :Union[str, Any] = CrossEntropyLoss()
__UpperCamelCase :Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__lowercase)
if train_highway:
__UpperCamelCase :Any = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase :Union[str, Any] = (loss,) + outputs
if not self.training:
__UpperCamelCase :Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase :List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 43
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None) -> Any:
super().__init__(
__lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , )
__UpperCamelCase :int = None
def UpperCamelCase__ ( self , __lowercase) -> Any:
logger.info('''initializing retrieval''')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''')
# needs to be set manually
__UpperCamelCase :Optional[int] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__UpperCamelCase :Any = str(distributed_port + 1)
__UpperCamelCase :List[Any] = dist.new_group(ranks=__lowercase , backend='''gloo''')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def UpperCamelCase__ ( self) -> List[str]:
return dist.get_rank(group=self.process_group) == 0
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=torch.floataa) -> List[str]:
__UpperCamelCase :List[str] = torch.empty(__lowercase , dtype=__lowercase)
dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group)
return target_tensor
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__UpperCamelCase :List[Any] = next((addr for addr in addrs if addr.startswith('''e''')) , __lowercase)
return ifname
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__UpperCamelCase , __UpperCamelCase :str = self._main_retrieve(__lowercase , __lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase)
# distributed training
__UpperCamelCase :str = dist.get_world_size(group=self.process_group)
# gather logic
__UpperCamelCase :int = None
if self._is_main():
__UpperCamelCase :Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(__lowercase)]
dist.gather(torch.tensor(__lowercase) , dst=0 , gather_list=__lowercase , group=self.process_group)
# scatter logic
__UpperCamelCase :Dict = question_hidden_states.shape[0]
__UpperCamelCase :Dict = []
__UpperCamelCase :Dict = []
if self._is_main():
assert len(__lowercase) == world_size
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self._main_retrieve(torch.cat(__lowercase).numpy() , __lowercase)
__UpperCamelCase , __UpperCamelCase :int = torch.tensor(__lowercase), torch.tensor(__lowercase)
__UpperCamelCase :Union[str, Any] = self._chunk_tensor(__lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = self._chunk_tensor(__lowercase , __lowercase)
__UpperCamelCase :Union[str, Any] = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa)
__UpperCamelCase :str = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase)
| 43
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__lowercase , '''embed_dim'''))
self.parent.assertTrue(hasattr(__lowercase , '''num_heads'''))
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=64 , __lowercase=3 , __lowercase=[16, 48, 96] , __lowercase=[1, 3, 6] , __lowercase=[1, 2, 10] , __lowercase=[7, 3, 3] , __lowercase=[4, 2, 2] , __lowercase=[2, 1, 1] , __lowercase=[2, 2, 2] , __lowercase=[False, False, True] , __lowercase=[0.0, 0.0, 0.0] , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=True , __lowercase=True , __lowercase=2 , ) -> Any:
__UpperCamelCase :Optional[Any] = parent
__UpperCamelCase :Tuple = batch_size
__UpperCamelCase :List[Any] = image_size
__UpperCamelCase :str = patch_sizes
__UpperCamelCase :Optional[Any] = patch_stride
__UpperCamelCase :int = patch_padding
__UpperCamelCase :int = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :str = num_labels
__UpperCamelCase :Tuple = num_channels
__UpperCamelCase :Optional[Any] = embed_dim
__UpperCamelCase :List[Any] = num_heads
__UpperCamelCase :Dict = stride_kv
__UpperCamelCase :Optional[Any] = depth
__UpperCamelCase :Any = cls_token
__UpperCamelCase :Dict = attention_drop_rate
__UpperCamelCase :int = initializer_range
__UpperCamelCase :str = layer_norm_eps
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :int = None
if self.use_labels:
# create a random int32 tensor of given shape
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
__UpperCamelCase :Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Optional[int]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :Any = TFCvtModel(config=__lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , training=__lowercase)
__UpperCamelCase :List[Any] = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase :Tuple = image_size[0], image_size[1]
for i in range(len(self.depth)):
__UpperCamelCase :Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__UpperCamelCase :int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :List[str] = self.num_labels
__UpperCamelCase :Any = TFCvtForImageClassification(__lowercase)
__UpperCamelCase :int = model(__lowercase , labels=__lowercase , training=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = config_and_inputs
__UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a__ : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a__ : List[str] = False
a__ : str = False
a__ : List[Any] = False
a__ : List[Any] = False
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = TFCvtModelTester(self)
__UpperCamelCase :List[str] = TFCvtConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Union[str, Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def UpperCamelCase__ ( self) -> int:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCamelCase__ ( self) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''')
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = tf.keras.mixed_precision.Policy('''mixed_float16''')
tf.keras.mixed_precision.set_global_policy(__lowercase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''')
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Dict = model_class(__lowercase)
__UpperCamelCase :Dict = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Dict = [*signature.parameters.keys()]
__UpperCamelCase :Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> str:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :List[Any] = outputs.hidden_states
__UpperCamelCase :Any = len(self.model_tester.depth)
self.assertEqual(len(__lowercase) , __lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :int = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase :Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = TFCvtModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Dict:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :Tuple = prepare_img()
__UpperCamelCase :Dict = image_processor(images=__lowercase , return_tensors='''tf''')
# forward pass
__UpperCamelCase :str = model(**__lowercase)
# verify the logits
__UpperCamelCase :List[str] = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :Dict = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1E-4))
| 43
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 1
|
__lowercase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__UpperCamelCase :int = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = ["""keras_nlp"""]
def __init__( self , *__lowercase , **__lowercase) -> Tuple:
requires_backends(self , ['''keras_nlp'''])
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase="" , __lowercase="train") -> Dict:
assert os.path.isdir(__lowercase)
__UpperCamelCase :Any = []
__UpperCamelCase :Optional[Any] = os.listdir(__lowercase)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__UpperCamelCase :Dict = os.path.join(__lowercase , __lowercase)
if not os.path.isfile(__lowercase):
continue
self.documents.append(__lowercase)
def __len__( self) -> List[Any]:
return len(self.documents)
def __getitem__( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :List[Any] = self.documents[idx]
__UpperCamelCase :List[Any] = document_path.split('''/''')[-1]
with open(__lowercase , encoding='''utf-8''') as source:
__UpperCamelCase :int = source.read()
__UpperCamelCase , __UpperCamelCase :int = process_story(__lowercase)
return document_name, story_lines, summary_lines
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : len(SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
__UpperCamelCase :Tuple = [_add_missing_period(SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
__UpperCamelCase :Dict = []
__UpperCamelCase :Tuple = deque(SCREAMING_SNAKE_CASE )
while True:
try:
__UpperCamelCase :Optional[int] = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__UpperCamelCase :Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith('''@highlight''' ) , SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE )) )
return sequence
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = torch.ones_like(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = sequence == pad_token_id
__UpperCamelCase :str = 0
return mask
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in story_lines]
__UpperCamelCase :Tuple = [token for sentence in story_lines_token_ids for token in sentence]
__UpperCamelCase :List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in summary_lines]
__UpperCamelCase :List[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = []
for sequence in batch:
__UpperCamelCase :Union[str, Any] = -1
__UpperCamelCase :List[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE )
return torch.tensor(SCREAMING_SNAKE_CASE )
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Tuple = CodeGenTokenizer
a__ : Optional[int] = CodeGenTokenizerFast
a__ : Dict = True
a__ : Optional[Any] = {"""add_prefix_space""": True}
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__UpperCamelCase :List[str] = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :Union[str, Any] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> str:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> int:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :List[Any] = '''lower newer'''
__UpperCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__UpperCamelCase :Optional[Any] = '''lower newer'''
__UpperCamelCase :Optional[int] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase :Tuple = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase)
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :int = tokens + [tokenizer.unk_token]
__UpperCamelCase :Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Any:
if not self.test_rust_tokenizer:
return
__UpperCamelCase :str = self.get_tokenizer()
__UpperCamelCase :int = self.get_rust_tokenizer(add_prefix_space=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
# Testing tokenization
__UpperCamelCase :Dict = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :Any = rust_tokenizer.tokenize(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
# Testing conversion to ids without special tokens
__UpperCamelCase :Optional[Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :List[str] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase)
self.assertListEqual(__lowercase , __lowercase)
# Testing conversion to ids with special tokens
__UpperCamelCase :Tuple = self.get_rust_tokenizer(add_prefix_space=__lowercase)
__UpperCamelCase :Any = tokenizer.encode(__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :Dict = rust_tokenizer.encode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
# Testing the unknown token
__UpperCamelCase :Union[str, Any] = tokens + [rust_tokenizer.unk_token]
__UpperCamelCase :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self , __lowercase=15) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :int = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
# Simple input
__UpperCamelCase :Dict = '''This is a simple input'''
__UpperCamelCase :Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCamelCase :Any = ('''This is a simple input''', '''This is a pair''')
__UpperCamelCase :Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''')
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''')
# Simple input
__UpperCamelCase :int = '''This is a simple input'''
__UpperCamelCase :Optional[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
__UpperCamelCase :Any = ('''This is a simple input''', '''This is a pair''')
__UpperCamelCase :Tuple = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__UpperCamelCase :Tuple = tokenizer.pad_token_id
__UpperCamelCase :Tuple = tokenizer(__lowercase , padding='''max_length''' , max_length=30 , return_tensors='''np''')
__UpperCamelCase :Optional[int] = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''')
__UpperCamelCase :int = tokenizer(*__lowercase , padding='''max_length''' , max_length=60 , return_tensors='''np''')
__UpperCamelCase :Dict = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''')
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s['''input_ids'''])
self.assertTrue(0 in out_s['''attention_mask'''])
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0])
self.assertFalse(0 in out_sa['''attention_mask'''][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1])
self.assertTrue(0 in out_sa['''attention_mask'''][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p['''input_ids'''])
self.assertTrue(0 in out_p['''attention_mask'''])
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0])
self.assertFalse(0 in out_pa['''attention_mask'''][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1])
self.assertTrue(0 in out_pa['''attention_mask'''][1])
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = '''$$$'''
__UpperCamelCase :Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase)
__UpperCamelCase :Any = '''This is a simple input'''
__UpperCamelCase :Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCamelCase :str = tokenizer.bos_token_id
__UpperCamelCase :int = tokenizer(__lowercase)
__UpperCamelCase :Any = tokenizer(__lowercase)
self.assertEqual(out_s.input_ids[0] , __lowercase)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
__UpperCamelCase :Any = tokenizer.decode(out_s.input_ids)
__UpperCamelCase :List[Any] = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , __lowercase)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''')
__UpperCamelCase :List[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
__UpperCamelCase :Dict = '''\nif len_a > len_b: result = a\nelse: result = b'''
__UpperCamelCase :List[Any] = tokenizer.encode(__lowercase)
__UpperCamelCase :str = ['''^#''', re.escape('''<|endoftext|>'''), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
__UpperCamelCase :Tuple = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase)
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Any:
pass
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = '''▁'''
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
__lowercase = {
'''google/pegasus-xsum''': 512,
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = PegasusTokenizer
a__ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<pad>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<mask_2>" , __lowercase="<mask_1>" , __lowercase=None , __lowercase=103 , **__lowercase , ) -> Any:
__UpperCamelCase :Dict = offset
if additional_special_tokens is not None:
if not isinstance(__lowercase , __lowercase):
raise TypeError(
f"""additional_special_tokens should be of type {type(__lowercase)}, but is"""
f""" {type(__lowercase)}""")
__UpperCamelCase :Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__lowercase) , self.offset - 1)
]
if len(set(__lowercase)) != len(__lowercase):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
__UpperCamelCase :Optional[int] = additional_special_tokens_extended
else:
__UpperCamelCase :str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
super().__init__(
__lowercase , tokenizer_file=__lowercase , pad_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , mask_token=__lowercase , mask_token_sent=__lowercase , offset=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
__UpperCamelCase :Dict = vocab_file
__UpperCamelCase :Optional[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :Tuple = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}""")
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(__lowercase)
elif token_ids_a is None:
return self._special_token_mask(__lowercase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Union[str, Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase):
copyfile(self.vocab_file , __lowercase)
return (out_vocab_file,)
| 43
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE ) as metadata_file:
__UpperCamelCase :int = json.load(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__UpperCamelCase :Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
__UpperCamelCase :str = load_original_entity_vocab(SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
__UpperCamelCase :Tuple = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCamelCase :Any = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase :Optional[int] = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
__UpperCamelCase :Any = json.load(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = '''MLukeTokenizer'''
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
__UpperCamelCase :List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__UpperCamelCase :Optional[int] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__UpperCamelCase :List[Any] = state_dict['''embeddings.word_embeddings.weight''']
__UpperCamelCase :Tuple = word_emb[ent_init_index].unsqueeze(0 )
__UpperCamelCase :List[str] = word_emb[enta_init_index].unsqueeze(0 )
__UpperCamelCase :Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCamelCase :Optional[int] = state_dict[bias_name]
__UpperCamelCase :int = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCamelCase :Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCamelCase :Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase :List[str] = f"""encoder.layer.{layer_index}.attention.self."""
__UpperCamelCase :str = state_dict[prefix + matrix_name]
__UpperCamelCase :Optional[Any] = state_dict[prefix + matrix_name]
__UpperCamelCase :Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase :int = state_dict['''entity_embeddings.entity_embeddings.weight''']
__UpperCamelCase :Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
__UpperCamelCase :Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCamelCase :List[str] = state_dict['''entity_predictions.bias''']
__UpperCamelCase :int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
__UpperCamelCase :int = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCamelCase :str = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__UpperCamelCase :Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__UpperCamelCase :Union[str, Any] = state_dict[key]
else:
__UpperCamelCase :Optional[int] = state_dict[key]
__UpperCamelCase , __UpperCamelCase :List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if set(SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCamelCase :List[str] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , task='''entity_classification''' )
__UpperCamelCase :Dict = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
__UpperCamelCase :Optional[Any] = (0, 9)
__UpperCamelCase :Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
__UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase :Optional[int] = torch.Size((1, 33, 768) )
__UpperCamelCase :Union[str, Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase :Union[str, Any] = torch.Size((1, 1, 768) )
__UpperCamelCase :Union[str, Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCamelCase :Optional[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = '''Tokyo is the capital of <mask>.'''
__UpperCamelCase :Any = (24, 30)
__UpperCamelCase :Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
__UpperCamelCase :Tuple = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = encoding['''input_ids'''][0].tolist()
__UpperCamelCase :int = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__UpperCamelCase :Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = outputs.entity_logits[0][0].argmax().item()
__UpperCamelCase :Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
__UpperCamelCase :List[Any] = [json.loads(SCREAMING_SNAKE_CASE ) for line in open(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :int = {}
for entry in data:
__UpperCamelCase :int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCamelCase :Optional[int] = entity_id
break
__UpperCamelCase :Tuple = f"""{language}:{entity_name}"""
__UpperCamelCase :Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 43
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
self.resolver.convert_models(['''heb-eng'''])
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[str] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowercase)
assert mmeta["long_pair"] == "heb-eng"
| 43
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Dict = MvpTokenizer
a__ : str = MvpTokenizerFast
a__ : Any = True
a__ : Optional[int] = filter_roberta_detectors
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :int = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :List[str] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self) -> Optional[Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''')
@cached_property
def UpperCamelCase__ ( self) -> Any:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''')
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__UpperCamelCase :Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Tuple = tokenizer(__lowercase , max_length=len(__lowercase) , padding=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__UpperCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase)
# Test that special tokens are reset
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :int = tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''')
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , __lowercase)
self.assertIn('''attention_mask''' , __lowercase)
self.assertNotIn('''labels''' , __lowercase)
self.assertNotIn('''decoder_attention_mask''' , __lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Union[str, Any] = tokenizer(text_target=__lowercase , max_length=32 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(32 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCamelCase__ ( self) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Union[str, Any] = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__lowercase , truncation=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual(batch.input_ids.shape , (2, 1_024))
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = ['''A long paragraph for summarization.''']
__UpperCamelCase :List[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :List[Any] = tokenizer(__lowercase , text_target=__lowercase , return_tensors='''pt''')
__UpperCamelCase :Tuple = inputs['''input_ids''']
__UpperCamelCase :Optional[int] = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def UpperCamelCase__ ( self) -> Optional[int]:
pass
def UpperCamelCase__ ( self) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :List[Any] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :str = '''A, <mask> AllenNLP sentence.'''
__UpperCamelCase :List[Any] = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
__UpperCamelCase :Union[str, Any] = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
__UpperCamelCase :Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
__UpperCamelCase :Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 43
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 1
|
from __future__ import annotations
from random import choice
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return choice(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = random_pivot(SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
__UpperCamelCase :str = [e for e in lst if e < pivot]
__UpperCamelCase :Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE , k - len(SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=3 , __lowercase=0.6 , __lowercase=None , ) -> Tuple:
__UpperCamelCase :List[str] = parent
__UpperCamelCase :List[Any] = batch_size
__UpperCamelCase :str = image_size
__UpperCamelCase :List[Any] = patch_size
__UpperCamelCase :List[str] = num_channels
__UpperCamelCase :Union[str, Any] = is_training
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :str = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Optional[Any] = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :str = hidden_dropout_prob
__UpperCamelCase :List[str] = attention_probs_dropout_prob
__UpperCamelCase :Union[str, Any] = type_sequence_label_size
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Optional[int] = mask_ratio
__UpperCamelCase :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase :Optional[Any] = (image_size // patch_size) ** 2
__UpperCamelCase :Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Tuple:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :Any = TFViTMAEModel(config=__lowercase)
__UpperCamelCase :Union[str, Any] = model(__lowercase , training=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :str = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :str = model(__lowercase , training=__lowercase)
# expected sequence length = num_patches
__UpperCamelCase :List[str] = (self.image_size // self.patch_size) ** 2
__UpperCamelCase :Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__UpperCamelCase :List[str] = 1
__UpperCamelCase :List[str] = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Dict = model(__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) :List[str] = config_and_inputs
__UpperCamelCase :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ : Dict = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ : Tuple = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = TFViTMAEModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase , __UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = model_class(__lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__UpperCamelCase :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , tf.keras.layers.Layer))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :int = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Tuple = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :str = model_class(__lowercase)
__UpperCamelCase :Optional[int] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Dict = model(__lowercase , noise=__lowercase)
__UpperCamelCase :int = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :Union[str, Any] = model(**__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = outputs_dict[0].numpy()
__UpperCamelCase :Union[str, Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1E-6)
def UpperCamelCase__ ( self) -> Optional[int]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :int = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :str = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(__lowercase):
__UpperCamelCase :Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowercase):
__UpperCamelCase :Optional[Any] = v.numpy()
else:
__UpperCamelCase :Optional[int] = np.array(__lowercase)
return inputs_np_dict
for model_class in self.all_model_classes:
__UpperCamelCase :int = model_class(__lowercase)
__UpperCamelCase :Tuple = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Any = prepare_numpy_arrays(__lowercase)
__UpperCamelCase :Any = model(__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = model(**__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
# make masks reproducible
np.random.seed(2)
__UpperCamelCase :Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
__UpperCamelCase :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :Dict = tf.constant(__lowercase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase :Any = tf_noise
super().check_pt_tf_models(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(__lowercase)
if module_member_name.endswith('''MainLayer''')
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''')] == model_class.__name__[: -len('''Model''')]
for module_member in (getattr(__lowercase , __lowercase),)
if isinstance(__lowercase , __lowercase)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowercase , '''_keras_serializable''' , __lowercase)
}
__UpperCamelCase :Union[str, Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :str = tf.convert_to_tensor(__lowercase)
inputs_dict.update({'''noise''': noise})
for main_layer_class in tf_main_layer_classes:
__UpperCamelCase :Optional[int] = main_layer_class(__lowercase)
__UpperCamelCase :Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
__UpperCamelCase :Dict = tf.keras.Model(__lowercase , outputs=main_layer(__lowercase))
__UpperCamelCase :str = model(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = os.path.join(__lowercase , '''keras_model.h5''')
model.save(__lowercase)
__UpperCamelCase :List[Any] = tf.keras.models.load_model(
__lowercase , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(__lowercase , tf.keras.Model)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[int] = model_class(__lowercase)
__UpperCamelCase :Union[str, Any] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :Any = outputs.last_hidden_state.numpy()
__UpperCamelCase :Optional[Any] = 0
else:
__UpperCamelCase :List[str] = outputs.logits.numpy()
__UpperCamelCase :Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase , saved_model=__lowercase)
__UpperCamelCase :Optional[int] = model_class.from_pretrained(__lowercase)
__UpperCamelCase :List[str] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :List[Any] = after_outputs['''last_hidden_state'''].numpy()
__UpperCamelCase :List[Any] = 0
else:
__UpperCamelCase :Any = after_outputs['''logits'''].numpy()
__UpperCamelCase :Tuple = 0
__UpperCamelCase :Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__lowercase , 1E-5)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :str = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :Any = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Tuple = model(__lowercase , noise=__lowercase)
__UpperCamelCase :List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowercase)
__UpperCamelCase :Optional[Any] = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
__UpperCamelCase :Any = model_class.from_config(model.config)
__UpperCamelCase :List[Any] = new_model(__lowercase) # Build model
new_model.set_weights(model.get_weights())
__UpperCamelCase :str = new_model(__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''')
def UpperCamelCase__ ( self) -> Any:
pass
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2)
__UpperCamelCase :Optional[Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''')
__UpperCamelCase :Optional[int] = self.default_image_processor
__UpperCamelCase :Optional[int] = prepare_img()
__UpperCamelCase :Optional[int] = image_processor(images=__lowercase , return_tensors='''tf''')
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase :Union[str, Any] = ViTMAEConfig()
__UpperCamelCase :Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__UpperCamelCase :Tuple = np.random.uniform(size=(1, num_patches))
# forward pass
__UpperCamelCase :int = model(**__lowercase , noise=__lowercase)
# verify the logits
__UpperCamelCase :Optional[int] = tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :List[Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowercase , atol=1E-4)
| 43
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowercase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase :List[str] = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__UpperCamelCase :Union[str, Any] = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__UpperCamelCase :List[Any] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=SCREAMING_SNAKE_CASE , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=SCREAMING_SNAKE_CASE , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__UpperCamelCase :Optional[Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=SCREAMING_SNAKE_CASE , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase :Dict = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase :str = defaults.commands
if not args.tpu_name:
__UpperCamelCase :List[str] = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase :Any = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase :Any = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__UpperCamelCase :Optional[int] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__UpperCamelCase :Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase :int = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__UpperCamelCase :Optional[Any] = '''; '''.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase :str = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('''Successfully setup pod.''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = tpu_command_parser()
__UpperCamelCase :Dict = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 43
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowercase = False
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self) -> Optional[Any]:
return 12
@property
def UpperCamelCase__ ( self) -> int:
return 12
@property
def UpperCamelCase__ ( self) -> Union[str, Any]:
return 32
@property
def UpperCamelCase__ ( self) -> Dict:
torch.manual_seed(0)
__UpperCamelCase :List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def UpperCamelCase__ ( self) -> Dict:
torch.manual_seed(0)
__UpperCamelCase :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__lowercase)
@property
def UpperCamelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
__UpperCamelCase :List[str] = 12
__UpperCamelCase :str = 12
__UpperCamelCase :Optional[int] = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__UpperCamelCase :int = TransformeraDModel(**__lowercase)
return model
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = '''cpu'''
__UpperCamelCase :Optional[Any] = self.dummy_vqvae
__UpperCamelCase :Tuple = self.dummy_text_encoder
__UpperCamelCase :Any = self.dummy_tokenizer
__UpperCamelCase :Any = self.dummy_transformer
__UpperCamelCase :int = VQDiffusionScheduler(self.num_embed)
__UpperCamelCase :Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase)
__UpperCamelCase :Union[str, Any] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
__UpperCamelCase :Dict = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Any = '''teddy bear playing in the pool'''
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(0)
__UpperCamelCase :Any = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''')
__UpperCamelCase :str = output.images
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(0)
__UpperCamelCase :Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2)[0]
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
__UpperCamelCase :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCamelCase :str = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :Any = self.dummy_vqvae
__UpperCamelCase :Optional[int] = self.dummy_text_encoder
__UpperCamelCase :Union[str, Any] = self.dummy_tokenizer
__UpperCamelCase :Dict = self.dummy_transformer
__UpperCamelCase :List[str] = VQDiffusionScheduler(self.num_embed)
__UpperCamelCase :Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__UpperCamelCase :List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
__UpperCamelCase :Optional[int] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = '''teddy bear playing in the pool'''
__UpperCamelCase :Any = torch.Generator(device=__lowercase).manual_seed(0)
__UpperCamelCase :str = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''')
__UpperCamelCase :Union[str, Any] = output.images
__UpperCamelCase :Any = torch.Generator(device=__lowercase).manual_seed(0)
__UpperCamelCase :Tuple = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2)[0]
__UpperCamelCase :str = image[0, -3:, -3:, -1]
__UpperCamelCase :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCamelCase :Optional[int] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''')
__UpperCamelCase :List[Any] = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''')
__UpperCamelCase :str = pipeline.to(__lowercase)
pipeline.set_progress_bar_config(disable=__lowercase)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__UpperCamelCase :Union[str, Any] = torch.Generator(device=__lowercase).manual_seed(0)
__UpperCamelCase :Tuple = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :Any = params
__UpperCamelCase :Optional[int] = np.array(__lowercase)
__UpperCamelCase :Union[str, Any] = np.array([len(__lowercase) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase) -> Union[str, Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self) -> Optional[Any]:
return len(self.lengths)
def UpperCamelCase__ ( self) -> List[Any]:
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.params.max_model_input_size
__UpperCamelCase :int = self.lengths > max_len
logger.info(f"""Splitting {sum(__lowercase)} too long sequences.""")
def divide_chunks(__lowercase , __lowercase):
return [l[i : i + n] for i in range(0 , len(__lowercase) , __lowercase)]
__UpperCamelCase :str = []
__UpperCamelCase :Union[str, Any] = []
if self.params.mlm:
__UpperCamelCase , __UpperCamelCase :Any = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__UpperCamelCase , __UpperCamelCase :str = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
__UpperCamelCase :Any = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
__UpperCamelCase :Dict = np.insert(__lowercase , 0 , __lowercase)
if sub_s[-1] != sep_id:
__UpperCamelCase :Optional[int] = np.insert(__lowercase , len(__lowercase) , __lowercase)
assert len(__lowercase) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase)
new_tok_ids.extend(__lowercase)
new_lengths.extend([len(__lowercase) for l in sub_seqs])
__UpperCamelCase :str = np.array(__lowercase)
__UpperCamelCase :str = np.array(__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = len(self)
__UpperCamelCase :List[Any] = self.lengths > 11
__UpperCamelCase :Optional[Any] = self.token_ids[indices]
__UpperCamelCase :List[str] = self.lengths[indices]
__UpperCamelCase :Optional[Any] = len(self)
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""")
def UpperCamelCase__ ( self) -> Union[str, Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCamelCase :Dict = self.params.special_tok_ids['''unk_token''']
__UpperCamelCase :int = len(self)
__UpperCamelCase :str = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
__UpperCamelCase :Dict = (unk_occs / self.lengths) < 0.5
__UpperCamelCase :str = self.token_ids[indices]
__UpperCamelCase :int = self.lengths[indices]
__UpperCamelCase :List[Any] = len(self)
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""")
def UpperCamelCase__ ( self) -> int:
if not self.params.is_master:
return
logger.info(f"""{len(self)} sequences""")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = [t[0] for t in batch]
__UpperCamelCase :Optional[int] = [t[1] for t in batch]
assert len(__lowercase) == len(__lowercase)
# Max for paddings
__UpperCamelCase :List[Any] = max(__lowercase)
# Pad token ids
if self.params.mlm:
__UpperCamelCase :Optional[int] = self.params.special_tok_ids['''pad_token''']
else:
__UpperCamelCase :Any = self.params.special_tok_ids['''unk_token''']
__UpperCamelCase :str = [list(t.astype(__lowercase)) + [pad_idx] * (max_seq_len_ - len(__lowercase)) for t in token_ids]
assert len(tk_) == len(__lowercase)
assert all(len(__lowercase) == max_seq_len_ for t in tk_)
__UpperCamelCase :Optional[int] = torch.tensor(tk_) # (bs, max_seq_len_)
__UpperCamelCase :List[Any] = torch.tensor(__lowercase) # (bs)
return tk_t, lg_t
| 43
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 1
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self) -> str:
__UpperCamelCase :Tuple = []
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Tuple:
self.events.append('''on_init_end''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> List[str]:
self.events.append('''on_train_begin''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Any:
self.events.append('''on_train_end''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> int:
self.events.append('''on_epoch_begin''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
self.events.append('''on_epoch_end''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
self.events.append('''on_step_begin''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> List[str]:
self.events.append('''on_step_end''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> List[Any]:
self.events.append('''on_evaluate''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> str:
self.events.append('''on_predict''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Optional[Any]:
self.events.append('''on_save''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Dict:
self.events.append('''on_log''')
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Any:
self.events.append('''on_prediction_step''')
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[int] = tempfile.mkdtemp()
def UpperCamelCase__ ( self) -> Union[str, Any]:
shutil.rmtree(self.output_dir)
def UpperCamelCase__ ( self , __lowercase=0 , __lowercase=0 , __lowercase=64 , __lowercase=64 , __lowercase=None , __lowercase=False , **__lowercase) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__UpperCamelCase :Optional[int] = RegressionDataset(length=__lowercase)
__UpperCamelCase :Dict = RegressionDataset(length=__lowercase)
__UpperCamelCase :Union[str, Any] = RegressionModelConfig(a=__lowercase , b=__lowercase)
__UpperCamelCase :List[str] = RegressionPreTrainedModel(__lowercase)
__UpperCamelCase :Tuple = TrainingArguments(self.output_dir , disable_tqdm=__lowercase , report_to=[] , **__lowercase)
return Trainer(
__lowercase , __lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , callbacks=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> str:
self.assertEqual(len(__lowercase) , len(__lowercase))
# Order doesn't matter
__UpperCamelCase :str = sorted(__lowercase , key=lambda __lowercase: cb.__name__ if isinstance(__lowercase , __lowercase) else cb.__class__.__name__)
__UpperCamelCase :int = sorted(__lowercase , key=lambda __lowercase: cb.__name__ if isinstance(__lowercase , __lowercase) else cb.__class__.__name__)
for cba, cba in zip(__lowercase , __lowercase):
if isinstance(__lowercase , __lowercase) and isinstance(__lowercase , __lowercase):
self.assertEqual(__lowercase , __lowercase)
elif isinstance(__lowercase , __lowercase) and not isinstance(__lowercase , __lowercase):
self.assertEqual(__lowercase , cba.__class__)
elif not isinstance(__lowercase , __lowercase) and isinstance(__lowercase , __lowercase):
self.assertEqual(cba.__class__ , __lowercase)
else:
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = ['''on_init_end''', '''on_train_begin''']
__UpperCamelCase :Dict = 0
__UpperCamelCase :List[str] = len(trainer.get_eval_dataloader())
__UpperCamelCase :Tuple = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader()) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs):
expected_events.append('''on_epoch_begin''')
for _ in range(__lowercase):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''')
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''')
expected_events.append('''on_epoch_end''')
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[str] = self.get_trainer()
__UpperCamelCase :int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
# Callbacks passed at init are added to the default callbacks
__UpperCamelCase :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(__lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__UpperCamelCase :Union[str, Any] = self.get_trainer(disable_tqdm=__lowercase)
__UpperCamelCase :List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__UpperCamelCase :Union[str, Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__lowercase)
expected_callbacks.remove(__lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
__UpperCamelCase :Union[str, Any] = self.get_trainer()
__UpperCamelCase :List[str] = trainer.pop_callback(__lowercase)
self.assertEqual(cb.__class__ , __lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
trainer.add_callback(__lowercase)
expected_callbacks.insert(0 , __lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
# We can also add, pop, or remove by instance
__UpperCamelCase :Any = self.get_trainer()
__UpperCamelCase :Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__lowercase)
expected_callbacks.remove(__lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
__UpperCamelCase :Tuple = self.get_trainer()
__UpperCamelCase :Dict = trainer.callback_handler.callbacks[0]
__UpperCamelCase :Tuple = trainer.pop_callback(__lowercase)
self.assertEqual(__lowercase , __lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
trainer.add_callback(__lowercase)
expected_callbacks.insert(0 , __lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=__lowercase)
__UpperCamelCase :List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
__UpperCamelCase :int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
# Independent log/save/eval
__UpperCamelCase :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
__UpperCamelCase :Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
__UpperCamelCase :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
__UpperCamelCase :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
__UpperCamelCase :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''')
trainer.train()
__UpperCamelCase :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
__UpperCamelCase :List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''')
trainer.train()
__UpperCamelCase :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
# A bit of everything
__UpperCamelCase :str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__UpperCamelCase :str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowercase , self.get_expected_events(__lowercase))
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''') as warn_mock:
__UpperCamelCase :Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__lowercase) in warn_mock.call_args[0][0]
| 43
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 1
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowercase = '''CompVis/stable-diffusion-v1-1'''
__lowercase = '''CompVis/stable-diffusion-v1-2'''
__lowercase = '''CompVis/stable-diffusion-v1-3'''
__lowercase = '''CompVis/stable-diffusion-v1-4'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Union[str, Any]:
super()._init_()
__UpperCamelCase :Dict = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(__lowercase)
__UpperCamelCase :Tuple = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def UpperCamelCase__ ( self) -> Dict[str, Any]:
return {k: getattr(self , __lowercase) for k in self.config.keys() if not k.startswith('''_''')}
def UpperCamelCase__ ( self , __lowercase = "auto") -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase :Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase)
def UpperCamelCase__ ( self) -> str:
self.enable_attention_slicing(__lowercase)
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Union[str, Any]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> int:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> str:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> str:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCamelCase__ ( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Optional[int]:
__UpperCamelCase :Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""")
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase :Tuple = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase :Any = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase :Dict = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase :Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 43
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowercase = logging.get_logger('''transformers.models.encodec''')
__lowercase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__lowercase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__lowercase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__lowercase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__lowercase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowercase = []
__lowercase = []
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :List[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_g":
__UpperCamelCase :str = value
elif weight_type == "weight_v":
__UpperCamelCase :Dict = value
elif weight_type == "bias":
__UpperCamelCase :Dict = value
elif weight_type == "running_mean":
__UpperCamelCase :Dict = value
elif weight_type == "running_var":
__UpperCamelCase :Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__UpperCamelCase :Any = value
elif weight_type == "weight_ih_l0":
__UpperCamelCase :str = value
elif weight_type == "weight_hh_l0":
__UpperCamelCase :int = value
elif weight_type == "bias_ih_l0":
__UpperCamelCase :List[str] = value
elif weight_type == "bias_hh_l0":
__UpperCamelCase :Tuple = value
elif weight_type == "weight_ih_l1":
__UpperCamelCase :List[Any] = value
elif weight_type == "weight_hh_l1":
__UpperCamelCase :List[str] = value
elif weight_type == "bias_ih_l1":
__UpperCamelCase :Dict = value
elif weight_type == "bias_hh_l1":
__UpperCamelCase :Tuple = value
else:
__UpperCamelCase :int = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase :Dict = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCamelCase :Any = MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCamelCase :Optional[int] = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
__UpperCamelCase :Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCamelCase , __UpperCamelCase :Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCamelCase :Union[str, Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :Tuple = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :Dict = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__UpperCamelCase :Optional[int] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__UpperCamelCase :Optional[Any] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__UpperCamelCase :List[Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__UpperCamelCase :Optional[int] = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__UpperCamelCase :List[str] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__UpperCamelCase :Any = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__UpperCamelCase :List[Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__UpperCamelCase :Optional[int] = '''bias_hh_l1'''
elif "bias" in name:
__UpperCamelCase :int = '''bias'''
elif "weight" in name:
__UpperCamelCase :int = '''weight'''
elif "running_mean" in name:
__UpperCamelCase :List[str] = '''running_mean'''
elif "running_var" in name:
__UpperCamelCase :str = '''running_var'''
elif "num_batches_tracked" in name:
__UpperCamelCase :List[str] = '''num_batches_tracked'''
else:
__UpperCamelCase :int = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Any = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCamelCase :List[str] = [8, 5, 4, 4]
__UpperCamelCase :Dict = [2.2]
__UpperCamelCase :int = 64
__UpperCamelCase :Any = 32_000
__UpperCamelCase :List[str] = 2_048
__UpperCamelCase :str = False
__UpperCamelCase :Any = False
__UpperCamelCase :Tuple = False
elif model_name == "encodec_48khz":
__UpperCamelCase :List[str] = [8, 5, 4, 2]
__UpperCamelCase :Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
__UpperCamelCase :Tuple = 48_000
__UpperCamelCase :Dict = 2
__UpperCamelCase :Optional[Any] = False
__UpperCamelCase :List[Any] = '''time_group_norm'''
__UpperCamelCase :int = True
__UpperCamelCase :int = 1.0
__UpperCamelCase :str = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
__UpperCamelCase :str = EncodecModel(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCamelCase :List[str] = original_checkpoint['''best_state''']
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowercase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 43
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 1
|
from math import pi
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 43
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
__lowercase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = set()
# keep track of all the paths to be checked
__UpperCamelCase :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCamelCase :Optional[Any] = queue.pop(0 )
# get the last node from the path
__UpperCamelCase :List[Any] = path[-1]
if node not in explored:
__UpperCamelCase :str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCamelCase :List[str] = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCamelCase :int = [start]
__UpperCamelCase :List[str] = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__UpperCamelCase :Any = {start: 0, target: -1}
while queue:
__UpperCamelCase :Optional[int] = queue.pop(0 )
if node == target:
__UpperCamelCase :Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase :Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase)
__UpperCamelCase :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''')
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = size
__UpperCamelCase :List[str] = resample
__UpperCamelCase :Dict = do_center_crop
__UpperCamelCase :Tuple = crop_size
__UpperCamelCase :Optional[Any] = do_rescale
__UpperCamelCase :str = rescale_factor
__UpperCamelCase :Optional[int] = do_normalize
__UpperCamelCase :List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase :int = do_convert_rgb
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Any = get_size_dict(__lowercase , default_to_square=__lowercase)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__UpperCamelCase :Any = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase)
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :List[Any] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :str = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Any = size if size is not None else self.size
__UpperCamelCase :int = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase)
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
__UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase :Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase :Dict = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase)
__UpperCamelCase :str = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :int = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Optional[int] = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :int = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase :List[str] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase :Optional[int] = [convert_to_rgb(__lowercase) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase :str = [to_numpy_array(__lowercase) for image in images]
if do_resize:
__UpperCamelCase :Optional[Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_center_crop:
__UpperCamelCase :Dict = [self.center_crop(image=__lowercase , size=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Optional[Any] = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :List[str] = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 1
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=None , __lowercase=None) -> int:
__UpperCamelCase :Optional[Any] = data
__UpperCamelCase :Union[str, Any] = previous
__UpperCamelCase :List[str] = next_node
def __str__( self) -> str:
return f"""{self.data}"""
def UpperCamelCase__ ( self) -> int:
return self.data
def UpperCamelCase__ ( self) -> Optional[Any]:
return self.next
def UpperCamelCase__ ( self) -> Dict:
return self.previous
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> Dict:
__UpperCamelCase :List[Any] = head
def __iter__( self) -> List[str]:
return self
def UpperCamelCase__ ( self) -> Optional[Any]:
if not self.current:
raise StopIteration
else:
__UpperCamelCase :str = self.current.get_data()
__UpperCamelCase :Optional[int] = self.current.get_next()
return value
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self) -> List[str]:
__UpperCamelCase :Any = None # First node in list
__UpperCamelCase :Union[str, Any] = None # Last node in list
def __str__( self) -> Any:
__UpperCamelCase :Optional[int] = self.head
__UpperCamelCase :Union[str, Any] = []
while current is not None:
nodes.append(current.get_data())
__UpperCamelCase :List[str] = current.get_next()
return " ".join(str(__lowercase) for node in nodes)
def __contains__( self , __lowercase) -> List[str]:
__UpperCamelCase :int = self.head
while current:
if current.get_data() == value:
return True
__UpperCamelCase :List[Any] = current.get_next()
return False
def __iter__( self) -> Dict:
return LinkedListIterator(self.head)
def UpperCamelCase__ ( self) -> Dict:
if self.head:
return self.head.get_data()
return None
def UpperCamelCase__ ( self) -> Dict:
if self.tail:
return self.tail.get_data()
return None
def UpperCamelCase__ ( self , __lowercase) -> None:
if self.head is None:
__UpperCamelCase :Any = node
__UpperCamelCase :Any = node
else:
self.insert_before_node(self.head , __lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
if self.head is None:
self.set_head(__lowercase)
else:
self.insert_after_node(self.tail , __lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :Any = Node(__lowercase)
if self.head is None:
self.set_head(__lowercase)
else:
self.set_tail(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :Tuple = node
__UpperCamelCase :Dict = node.previous
if node.get_previous() is None:
__UpperCamelCase :Any = node_to_insert
else:
__UpperCamelCase :str = node_to_insert
__UpperCamelCase :List[str] = node_to_insert
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :Union[str, Any] = node
__UpperCamelCase :int = node.next
if node.get_next() is None:
__UpperCamelCase :Dict = node_to_insert
else:
__UpperCamelCase :int = node_to_insert
__UpperCamelCase :int = node_to_insert
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :Tuple = 1
__UpperCamelCase :List[str] = Node(__lowercase)
__UpperCamelCase :List[str] = self.head
while node:
if current_position == position:
self.insert_before_node(__lowercase , __lowercase)
return
current_position += 1
__UpperCamelCase :List[str] = node.next
self.insert_after_node(self.tail , __lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Node:
__UpperCamelCase :List[Any] = self.head
while node:
if node.get_data() == item:
return node
__UpperCamelCase :str = node.get_next()
raise Exception('''Node not found''')
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
if (node := self.get_node(__lowercase)) is not None:
if node == self.head:
__UpperCamelCase :Union[str, Any] = self.head.get_next()
if node == self.tail:
__UpperCamelCase :Optional[int] = self.tail.get_previous()
self.remove_node_pointers(__lowercase)
@staticmethod
def UpperCamelCase__ ( __lowercase) -> None:
if node.get_next():
__UpperCamelCase :str = node.previous
if node.get_previous():
__UpperCamelCase :List[str] = node.next
__UpperCamelCase :int = None
__UpperCamelCase :int = None
def UpperCamelCase__ ( self) -> Tuple:
return self.head is None
def lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 43
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.