code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
| 1
|
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
lowercase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ = j
return prefix_result
def a ( snake_case__: str ):
'''simple docstring'''
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 1_6
__a = 3_2
def a ( snake_case__: Accelerator , snake_case__: int = 16 ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__: Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__: Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 16
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowercase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def a ( snake_case__: Optional[int] , snake_case__: int ):
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
lowercase_ = 2
# Initialize accelerator
lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config['''lr''']
lowercase_ = int(config['''num_epochs'''] )
lowercase_ = int(config['''seed'''] )
lowercase_ = int(config['''batch_size'''] )
lowercase_ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__: int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=snake_case__ )
lowercase_ , lowercase_ = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase_ = parser.parse_args()
lowercase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = AutoConfig.from_pretrained(snake_case__ )
lowercase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case__ )
lowercase_ = checkpoints.load_tax_checkpoint(snake_case__ )
lowercase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowercase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''encoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_global_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = tax_mlp_layer_norm
lowercase_ = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_global_rel_embedding
# Assigning
lowercase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowercase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowercase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''decoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_pre_attention_layer_norm
lowercase_ = tax_enc_dec_attention_key
lowercase_ = tax_enc_dec_attention_out
lowercase_ = tax_enc_dec_attention_query
lowercase_ = tax_enc_dec_attention_value
lowercase_ = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = txa_mlp_layer_norm
lowercase_ = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowercase_ = txa_decoder_norm
# Only for layer 0:
lowercase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
lowercase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__a = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 30
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( snake_case__: Dict[str, torch.Tensor] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = []
for rt in rc.restypes:
lowercase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase_ = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase_ = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase_ = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ = restype_atomaa_mask[protein_aatype]
lowercase_ = residx_atomaa_mask
lowercase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase_ = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase_ = rc.restype_atoa[restype_letter]
lowercase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase_ = rc.atom_order[atom_name]
lowercase_ = 1
lowercase_ = restype_atomaa_mask[protein_aatype]
lowercase_ = residx_atomaa_mask
return protein
def a ( snake_case__: Dict[str, torch.Tensor] ):
'''simple docstring'''
lowercase_ = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch['''aatype'''].device ) , snake_case__ , np.ndarray )
lowercase_ = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 30
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
| 1
|
from math import factorial
def a ( snake_case__: int , snake_case__: int , snake_case__: float ):
'''simple docstring'''
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
lowercase_ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase_ = float(factorial(snake_case__ ) )
coefficient /= factorial(snake_case__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( snake_case__: Tuple ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> int:
lowercase_ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=SCREAMING_SNAKE_CASE_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool ) -> Optional[Any]:
lowercase_ = model
lowercase_ = cache
lowercase_ = force
lowercase_ = trust_remote_code
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 30
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a ( snake_case__: str , snake_case__: Tuple ):
'''simple docstring'''
inspect_dataset(snake_case__ , snake_case__ )
lowercase_ = path + '''.py'''
assert script_name in os.listdir(snake_case__ )
assert "__pycache__" not in os.listdir(snake_case__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a ( snake_case__: List[str] , snake_case__: Dict ):
'''simple docstring'''
inspect_metric(snake_case__ , snake_case__ )
lowercase_ = path + '''.py'''
assert script_name in os.listdir(snake_case__ )
assert "__pycache__" not in os.listdir(snake_case__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: Dict , snake_case__: Optional[int] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = get_dataset_config_info(snake_case__ , config_name=snake_case__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( snake_case__: Any , snake_case__: Optional[int] , snake_case__: List[Any] ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
get_dataset_config_info(snake_case__ , config_name=snake_case__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a ( snake_case__: int , snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = get_dataset_config_names(snake_case__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = get_dataset_infos(snake_case__ )
assert list(infos.keys() ) == expected_configs
lowercase_ = expected_configs[0]
assert expected_config in infos
lowercase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: str , snake_case__: int , snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = get_dataset_infos(snake_case__ )
assert expected_config in infos
lowercase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( snake_case__: List[str] , snake_case__: str , snake_case__: Optional[Any] ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
get_dataset_split_names(snake_case__ , config_name=snake_case__ )
| 30
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
| 1
|
import os
def a ( snake_case__: str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as input_file:
lowercase_ = [
[int(snake_case__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase_ = len(snake_case__ )
lowercase_ = len(matrix[0] )
lowercase_ = [[-1 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
lowercase_ = matrix[i][0]
for j in range(1 , snake_case__ ):
for i in range(snake_case__ ):
lowercase_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case__ ):
lowercase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30
| 1
|
import argparse
import json
import subprocess
def a ( snake_case__: Union[str, Any] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = []
lowercase_ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase_ = subprocess.run(snake_case__ , shell=snake_case__ , stdout=subprocess.PIPE )
lowercase_ = output.stdout.decode('''utf-8''' )
lowercase_ = json.loads(snake_case__ )
lowercase_ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(snake_case__ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(snake_case__ ) )
if len(snake_case__ ) > 0:
lowercase_ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
return values.split(''',''' )
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
__a = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: str ):
'''simple docstring'''
lowercase_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a ( snake_case__: int , snake_case__: Dict ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase_ = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowercase_ = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( snake_case__: Tuple , snake_case__: Optional[Any] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = dct.pop(snake_case__ )
lowercase_ = val
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase_ = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase_ = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a ( snake_case__: List[Any] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = ViTConfig(image_size=384 , qkv_bias=snake_case__ )
lowercase_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase_ = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase_ = 1_024
lowercase_ = 4_096
lowercase_ = 24
lowercase_ = 16
lowercase_ = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase_ = False
lowercase_ = '''relu'''
lowercase_ = 1_024
lowercase_ = True
lowercase_ = False
lowercase_ = False
# load HuggingFace model
lowercase_ = ViTModel(snake_case__ , add_pooling_layer=snake_case__ )
lowercase_ = TrOCRForCausalLM(snake_case__ )
lowercase_ = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' , check_hash=snake_case__ )['''model''']
lowercase_ = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(snake_case__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowercase_ = val
else:
lowercase_ = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
lowercase_ = ViTImageProcessor(size=encoder_config.image_size )
lowercase_ = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowercase_ = TrOCRProcessor(snake_case__ , snake_case__ )
lowercase_ = processor(images=prepare_img(snake_case__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowercase_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase_ = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__ )
lowercase_ = outputs.logits
lowercase_ = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase_ = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase_ = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowercase_ = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowercase_ = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a ( snake_case__: int , snake_case__: int , snake_case__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = (1 - _cos) / 2
lowercase_ = 1 - _cos
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = (1 + _cos) / 2
lowercase_ = -1 - _cos
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = _sin / 2
lowercase_ = 0
lowercase_ = -ba
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 1 - alpha
lowercase_ = -2 * _cos
lowercase_ = 1 + alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float , snake_case__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = 1 + alpha * big_a
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha * big_a
lowercase_ = 1 + alpha / big_a
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha / big_a
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float , snake_case__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = (big_a + 1) - (big_a - 1) * _cos
lowercase_ = (big_a + 1) + (big_a - 1) * _cos
lowercase_ = (big_a - 1) - (big_a + 1) * _cos
lowercase_ = (big_a - 1) + (big_a + 1) * _cos
lowercase_ = 2 * sqrt(snake_case__ ) * alpha
lowercase_ = big_a * (pmc + aaa)
lowercase_ = 2 * big_a * mpc
lowercase_ = big_a * (pmc - aaa)
lowercase_ = ppmc + aaa
lowercase_ = -2 * pmpc
lowercase_ = ppmc - aaa
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( snake_case__: int , snake_case__: int , snake_case__: float , snake_case__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(snake_case__ )
lowercase_ = cos(snake_case__ )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = (big_a + 1) - (big_a - 1) * _cos
lowercase_ = (big_a + 1) + (big_a - 1) * _cos
lowercase_ = (big_a - 1) - (big_a + 1) * _cos
lowercase_ = (big_a - 1) + (big_a + 1) * _cos
lowercase_ = 2 * sqrt(snake_case__ ) * alpha
lowercase_ = big_a * (ppmc + aaa)
lowercase_ = -2 * big_a * pmpc
lowercase_ = big_a * (ppmc - aaa)
lowercase_ = pmc + aaa
lowercase_ = 2 * mpc
lowercase_ = pmc - aaa
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> int:
lowercase_ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : str ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = ort.SessionOptions()
lowercase_ = False
return options
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase ( self : Dict ) -> Optional[Any]:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ = init_image.resize((1_2_8, 1_2_8) )
lowercase_ = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 30
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a :Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a :str = False
a :Any = False
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Optional[int]:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowercase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=1_3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> Any:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = embedding_size
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = TFMobileBertModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
lowercase_ = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
lowercase_ = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
lowercase_ = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
lowercase_ = self.num_choices
lowercase_ = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
lowercase_ = self.num_labels
lowercase_ = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _lowercase ( self : int ) -> List[str]:
lowercase_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Dict ) -> Any:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase_ = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 30
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__a = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__a = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[List[List[str]]] , SCREAMING_SNAKE_CASE_ : List[List[str]] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ )
}
| 30
|
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Optional[int]:
super().__init__()
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = False
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
lowercase_ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = TaLayerNorm(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
lowercase_ = self.token_embedder(SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
lowercase_ = lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 30
|
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_8 , SCREAMING_SNAKE_CASE_ : Any=3_0 , SCREAMING_SNAKE_CASE_ : Dict=4_0_0 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , SCREAMING_SNAKE_CASE_ : Optional[int]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , ) -> Optional[int]:
lowercase_ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_convert_rgb
def _lowercase ( self : Any ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase_ = []
for i in range(self.batch_size ):
lowercase_ , lowercase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase_ = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Any ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ) -> Union[str, Any]:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _lowercase ( self : int ) -> Optional[int]:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def _lowercase ( self : List[str] ) -> List[Any]:
pass
def _lowercase ( self : List[Any] ) -> List[str]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self : int ) -> int:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase ( self : int ) -> int:
lowercase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowercase_ = 3
@property
def _lowercase ( self : Optional[int] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _lowercase ( self : Dict ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 30
|
import argparse
from collections import defaultdict
import yaml
__a = 'docs/source/en/_toctree.yml'
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def a ( snake_case__: List[Any]=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]['''sections''']
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]['''sections''']
lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc['''sections''']
lowercase_ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
| 1
|
from math import pow, sqrt
def a ( *snake_case__: float ):
'''simple docstring'''
lowercase_ = len(snake_case__ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a ( snake_case__: float , snake_case__: float , snake_case__: float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( snake_case__: float , snake_case__: float , snake_case__: float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( snake_case__: float , snake_case__: float , snake_case__: float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( snake_case__: float , snake_case__: float , snake_case__: float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 30
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = 'gptj'
a :List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=5_0_4_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Any=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_8 , SCREAMING_SNAKE_CASE_ : List[Any]=1_6 , SCREAMING_SNAKE_CASE_ : int=6_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : str=1e-5 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : str=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Union[str, Any]:
lowercase_ = vocab_size
lowercase_ = n_positions
lowercase_ = n_embd
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = n_inner
lowercase_ = rotary_dim
lowercase_ = activation_function
lowercase_ = resid_pdrop
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = use_cache
lowercase_ = bos_token_id
lowercase_ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
lowercase_ = 0
@property
def _lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
lowercase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
lowercase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _lowercase ( self : str ) -> int:
return self._config.n_layer
@property
def _lowercase ( self : List[str] ) -> int:
return self._config.n_head
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase_ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
lowercase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase_ , lowercase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase_ = seqlen + 2
lowercase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase_ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
lowercase_ = common_inputs['''attention_mask''']
if self.use_past:
lowercase_ = ordered_inputs['''attention_mask'''].dtype
lowercase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def _lowercase ( self : List[str] ) -> int:
return 1_3
| 30
|
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'masked_bert'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = pruning_method
lowercase_ = mask_init
lowercase_ = mask_scale
| 30
| 1
|
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
lowercase_ = []
lowercase_ = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
lowercase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 30
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
| 1
|
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations(snake_case__: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__: int , snake_case__: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
lowercase_ = answer
return answer
lowercase_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * (target + 1)
lowercase_ = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 30
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowercase__:
"""simple docstring"""
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
raise NotImplementedError()
def _lowercase ( self : Dict ) -> str:
raise NotImplementedError()
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
lowercase_ = tokenizer
lowercase_ = skip_prompt
lowercase_ = decode_kwargs
# variables used in the streaming process
lowercase_ = []
lowercase_ = 0
lowercase_ = True
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowercase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
else:
lowercase_ = ''''''
lowercase_ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> int:
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = Queue()
lowercase_ = None
lowercase_ = timeout
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple ) -> Union[str, Any]:
return self
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
from collections.abc import Callable
def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = a
lowercase_ = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
lowercase_ = mid
else:
lowercase_ = mid
lowercase_ = start + (end - start) / 2.0
return mid
def a ( snake_case__: float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 30
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, "Image.Image", List[Dict[str, Any]]] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
if "text_queries" in kwargs:
lowercase_ = kwargs.pop('''text_queries''' )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image) ):
lowercase_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowercase_ = image
lowercase_ = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return results
def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
lowercase_ = {}
if "threshold" in kwargs:
lowercase_ = kwargs['''threshold''']
if "top_k" in kwargs:
lowercase_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
lowercase_ = load_image(inputs['''image'''] )
lowercase_ = inputs['''candidate_labels''']
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = candidate_labels.split(''',''' )
lowercase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = model_inputs.pop('''target_size''' )
lowercase_ = model_inputs.pop('''candidate_label''' )
lowercase_ = model_inputs.pop('''is_last''' )
lowercase_ = self.model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Dict:
lowercase_ = []
for model_output in model_outputs:
lowercase_ = model_output['''candidate_label''']
lowercase_ = BaseModelOutput(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowercase_ = outputs['''scores'''][index].item()
lowercase_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowercase_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["score"] , reverse=SCREAMING_SNAKE_CASE_ )
if top_k:
lowercase_ = results[:top_k]
return results
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = box.int().tolist()
lowercase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 30
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'MobileNetV1Config'
# Base docstring
__a = 'google/mobilenet_v1_1.0_224'
__a = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__a = 'google/mobilenet_v1_1.0_224'
__a = 'tabby, tabby cat'
__a = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a ( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] , snake_case__: List[Any]=None ):
'''simple docstring'''
lowercase_ = {}
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = model.mobilenet_va
else:
lowercase_ = model
lowercase_ = '''MobilenetV1/Conv2d_0/'''
lowercase_ = backbone.conv_stem.convolution.weight
lowercase_ = backbone.conv_stem.normalization.bias
lowercase_ = backbone.conv_stem.normalization.weight
lowercase_ = backbone.conv_stem.normalization.running_mean
lowercase_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowercase_ = i + 1
lowercase_ = i * 2
lowercase_ = backbone.layer[pt_index]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
lowercase_ = backbone.layer[pt_index + 1]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ = model.classifier.weight
lowercase_ = model.classifier.bias
return tf_to_pt_map
def a ( snake_case__: Dict , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ = tf.train.list_variables(snake_case__ )
lowercase_ = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ = tf.train.load_variable(snake_case__ , snake_case__ )
lowercase_ = array
# Build TF to PyTorch weights loading map
lowercase_ = _build_tf_to_pytorch_map(snake_case__ , snake_case__ , snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ = np.transpose(snake_case__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ = array.squeeze().transpose()
else:
lowercase_ = np.transpose(snake_case__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ , snake_case__ )
tf_weights.pop(name + '''/RMSProp''' , snake_case__ )
tf_weights.pop(name + '''/RMSProp_1''' , snake_case__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , snake_case__ )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def a ( snake_case__: torch.Tensor , snake_case__: nn.Convad ):
'''simple docstring'''
lowercase_ , lowercase_ = features.shape[-2:]
lowercase_ , lowercase_ = conv_layer.stride
lowercase_ , lowercase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ = max(kernel_height - stride_height , 0 )
else:
lowercase_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ = max(kernel_width - stride_width , 0 )
else:
lowercase_ = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ = pad_along_width // 2
lowercase_ = pad_along_width - pad_left
lowercase_ = pad_along_height // 2
lowercase_ = pad_along_height - pad_top
lowercase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ , snake_case__ , '''constant''' , 0.0 )
class lowercase__( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
lowercase_ = ACTaFN[config.hidden_act]
else:
lowercase_ = config.hidden_act
else:
lowercase_ = None
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution )
lowercase_ = self.convolution(SCREAMING_SNAKE_CASE_ )
if self.normalization is not None:
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
if self.activation is not None:
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return features
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = MobileNetVaConfig
a :Tuple = load_tf_weights_in_mobilenet_va
a :Optional[Any] = 'mobilenet_v1'
a :Any = 'pixel_values'
a :Optional[Any] = False
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__a = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : bool = True ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = 3_2
lowercase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
lowercase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ = nn.ModuleList()
for i in range(1_3 ):
lowercase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ) )
lowercase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ = self.conv_stem(SCREAMING_SNAKE_CASE_ )
lowercase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = all_hidden_states + (hidden_states,)
lowercase_ = hidden_states
if self.pooler is not None:
lowercase_ = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_ ) , start_dim=1 )
else:
lowercase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = MobileNetVaModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ = '''single_label_classification'''
else:
lowercase_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ = MSELoss()
if self.num_labels == 1:
lowercase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ = BCEWithLogitsLoss()
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :int = None
a :float = 0.0
a :bool = None
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Dropout(self.dropout_prob )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ = None
if use_nin_shortcut:
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Tuple:
lowercase_ = hidden_states
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
lowercase_ = hidden_states + temb
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
lowercase_ = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 30
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__a = 'src/diffusers'
# Matches is_xxx_available()
__a = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__a = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__a = '\n{0} = None\n'
__a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = _re_backend.findall(snake_case__ )
if len(snake_case__ ) == 0:
return None
return "_and_".join(snake_case__ )
def a ( ):
'''simple docstring'''
with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ = f.readlines()
# Get to the point we do the actual imports for type checking
lowercase_ = 0
lowercase_ = {}
# Go through the end of the file
while line_index < len(snake_case__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowercase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case__ ) and len(lines[line_index] ) > 1:
lowercase_ = lines[line_index]
lowercase_ = _re_single_line_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case__ ) > 0:
lowercase_ = objects
else:
line_index += 1
return backend_specific_objects
def a ( snake_case__: Optional[int] , snake_case__: List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(snake_case__ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case__ , snake_case__ )
else:
return DUMMY_CLASS.format(snake_case__ , snake_case__ )
def a ( snake_case__: List[str]=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowercase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowercase_ = {}
for backend, objects in backend_specific_objects.items():
lowercase_ = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowercase_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case__ , snake_case__ ) for o in objects] )
lowercase_ = dummy_file
return dummy_files
def a ( snake_case__: Optional[int]=False ):
'''simple docstring'''
lowercase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowercase_ = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowercase_ = os.path.join(snake_case__ , '''utils''' )
lowercase_ = {
backend: os.path.join(snake_case__ , F'''dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py''' )
for backend in dummy_files.keys()
}
lowercase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case__ ):
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ = f.read()
else:
lowercase_ = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 30
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'layoutlmv3'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str=5_0_2_6_5 , SCREAMING_SNAKE_CASE_ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_2_8 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2_8 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2_8 , SCREAMING_SNAKE_CASE_ : Tuple=6_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> str:
super().__init__(
vocab_size=SCREAMING_SNAKE_CASE_ , hidden_size=SCREAMING_SNAKE_CASE_ , num_hidden_layers=SCREAMING_SNAKE_CASE_ , num_attention_heads=SCREAMING_SNAKE_CASE_ , intermediate_size=SCREAMING_SNAKE_CASE_ , hidden_act=SCREAMING_SNAKE_CASE_ , hidden_dropout_prob=SCREAMING_SNAKE_CASE_ , attention_probs_dropout_prob=SCREAMING_SNAKE_CASE_ , max_position_embeddings=SCREAMING_SNAKE_CASE_ , type_vocab_size=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , layer_norm_eps=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = max_ad_position_embeddings
lowercase_ = coordinate_size
lowercase_ = shape_size
lowercase_ = has_relative_attention_bias
lowercase_ = rel_pos_bins
lowercase_ = max_rel_pos
lowercase_ = has_spatial_attention_bias
lowercase_ = rel_ad_pos_bins
lowercase_ = max_rel_ad_pos
lowercase_ = text_embed
lowercase_ = visual_embed
lowercase_ = input_size
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = classifier_dropout
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = version.parse('1.12' )
@property
def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def _lowercase ( self : List[str] ) -> float:
return 1e-5
@property
def _lowercase ( self : Any ) -> int:
return 1_2
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : "ProcessorMixin" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 4_0 , SCREAMING_SNAKE_CASE_ : int = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , SCREAMING_SNAKE_CASE_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = processor.tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ )
lowercase_ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE_ )
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase_ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = dict(
processor(
SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , ) )
return inputs
| 30
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30
| 1
|
def a ( snake_case__: int ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
def a ( snake_case__: list ):
'''simple docstring'''
def merge(snake_case__: list , snake_case__: list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
lowercase_ = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=1_3 , SCREAMING_SNAKE_CASE_ : int=3_0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=2 , ) -> Any:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
lowercase_ = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
lowercase_ = self.type_sequence_label_size
lowercase_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a :Any = False
a :Optional[int] = False
a :Tuple = False
a :int = False
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = TFDeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _lowercase ( self : List[str] ) -> str:
pass
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[int] ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> List[str]:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : int ) -> int:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> Any:
lowercase_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=1_0 , SCREAMING_SNAKE_CASE_ : Tuple=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=None , ) -> Optional[int]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = embeddings_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = scope
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a :str = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a :Tuple = False
a :List[str] = False
a :Union[str, Any] = False
a :List[Any] = False
a :List[Any] = False
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = TFRegNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowercase ( self : Optional[int] ) -> Any:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _lowercase ( self : Union[str, Any] ) -> int:
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowercase ( self : List[Any] ) -> List[Any]:
pass
def _lowercase ( self : int ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> str:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ = layer_type
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any={} ):
lowercase_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Dict ) -> Any:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Any ) -> Any:
lowercase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 30
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a ( ):
'''simple docstring'''
lowercase_ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase_ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case__ )
DownloadCommand.register_subcommand(snake_case__ )
EnvironmentCommand.register_subcommand(snake_case__ )
RunCommand.register_subcommand(snake_case__ )
ServeCommand.register_subcommand(snake_case__ )
UserCommands.register_subcommand(snake_case__ )
AddNewModelCommand.register_subcommand(snake_case__ )
AddNewModelLikeCommand.register_subcommand(snake_case__ )
LfsCommands.register_subcommand(snake_case__ )
PTtoTFCommand.register_subcommand(snake_case__ )
# Let's go
lowercase_ = parser.parse_args()
if not hasattr(snake_case__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase_ = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 30
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ) -> str:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE_ , )
assert hasattr(self , '''env''' )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
# configuration for running training on smdistributed Model Parallel
lowercase_ = {
'''enabled''': True,
'''processes_per_host''': 8,
}
lowercase_ = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
lowercase_ = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
lowercase_ = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE_ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE_ , py_version='''py36''' , )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
# create estimator
lowercase_ = self.create_estimator(SCREAMING_SNAKE_CASE_ )
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE_ )
| 30
|
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30
| 1
|
def a ( snake_case__: int = 1_000 ):
'''simple docstring'''
lowercase_ = 2**power
lowercase_ = 0
while n:
lowercase_ , lowercase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
|
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
def a ( snake_case__: list[int] , snake_case__: str ):
'''simple docstring'''
lowercase_ = int(snake_case__ )
# Initialize Result
lowercase_ = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__a = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
__a = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"Following is minimal change for {value}: ")
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 30
|
import argparse
from collections import defaultdict
import yaml
__a = 'docs/source/en/_toctree.yml'
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def a ( snake_case__: List[Any]=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]['''sections''']
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]['''sections''']
lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc['''sections''']
lowercase_ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30
| 1
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self : Tuple ) -> Optional[int]:
lowercase_ = None
lowercase_ = 2_0
lowercase_ = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
lowercase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
lowercase_ = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self : str ) -> Union[str, Any]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create ramp distribution
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy()
lowercase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ = 5
lowercase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, length) ).copy()
lowercase_ = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self : Dict ) -> List[str]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
lowercase_ = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
lowercase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
lowercase_ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowercase_ = 5
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_5
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
lowercase_ = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowercase_ = 1
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = 5
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowercase_ = 4
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self : List[str] ) -> int:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 30
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
import sys
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( snake_case__: str = N ):
'''simple docstring'''
lowercase_ = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
lowercase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase_ = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'masked_bert'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = pruning_method
lowercase_ = mask_init
lowercase_ = mask_scale
| 30
| 1
|
import mpmath # for roots of unity
import numpy as np
class lowercase__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> List[Any]:
# Input as list
lowercase_ = list(poly_a or [0] )[:]
lowercase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase_ = self.__multiply()
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
lowercase_ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return dft[0]
#
lowercase_ = self.c_max_length // 2
while next_ncol > 0:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root**next_ncol
# First half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase_ = new_dft
lowercase_ = next_ncol // 2
return dft[0]
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.__dft('''A''' )
lowercase_ = self.__dft('''B''' )
lowercase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase_ = 2
while next_ncol <= self.c_max_length:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root ** (next_ncol // 2)
lowercase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase_ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> Optional[int]:
lowercase_ = '''A = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase_ = '''B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase_ = '''A*B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase__:
"""simple docstring"""
a :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
a :Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
a :Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
a :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
a :int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
a :float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
a :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a :str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
a :bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class lowercase__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str]=1_9_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : List[str]=0.6 ) -> List[str]:
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ) -> List[Any]:
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase_ = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase_ = split['''train''']
lowercase_ = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , '''decoder_type''' ):
lowercase_ = '''simmim'''
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
lowercase_ = ds['''train'''].column_names
else:
lowercase_ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = '''image'''
elif "img" in column_names:
lowercase_ = '''img'''
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda snake_case__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__: Optional[int] ):
lowercase_ = [transforms(snake_case__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
lowercase_ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
lowercase_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 30
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'altclip_text_model'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_5_0_0_0_2 , SCREAMING_SNAKE_CASE_ : int=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : str=1e-05 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , **SCREAMING_SNAKE_CASE_ : int , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = initializer_factor
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = project_dim
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'altclip_vision_model'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , **SCREAMING_SNAKE_CASE_ : str , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = projection_dim
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = image_size
lowercase_ = initializer_range
lowercase_ = initializer_factor
lowercase_ = attention_dropout
lowercase_ = layer_norm_eps
lowercase_ = hidden_act
@classmethod
def _lowercase ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowercase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'altclip'
a :List[str] = True
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=2.65_92 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ = {}
# This is the complete result when using `text_config_dict`.
lowercase_ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ = {
str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowercase_ = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowercase_ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = projection_dim
lowercase_ = logit_scale_init_value
lowercase_ = 1.0
@classmethod
def _lowercase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : AltCLIPTextConfig , SCREAMING_SNAKE_CASE_ : AltCLIPVisionConfig , **SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.text_config.to_dict()
lowercase_ = self.vision_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = ''
a :Optional[Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> int:
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
lowercase_ = repo_info
lowercase_ = token
lowercase_ = None
def _lowercase ( self : Dict ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {'''name''': str(SCREAMING_SNAKE_CASE_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str = "rb" , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Optional[Any]:
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowercase_ = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> int:
self._get_dirs()
lowercase_ = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
self._get_dirs()
lowercase_ = PurePosixPath(path.strip('''/''' ) )
lowercase_ = {}
for p, f in self.dir_cache.items():
lowercase_ = PurePosixPath(p.strip('''/''' ) )
lowercase_ = p.parent
if root == path:
lowercase_ = f
lowercase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
def a ( snake_case__: int = 3 , snake_case__: int = 7 , snake_case__: int = 1_000_000 ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 1
for current_denominator in range(1 , limit + 1 ):
lowercase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase_ = current_numerator
lowercase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 30
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__a = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__a = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__a = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
def a ( snake_case__: float , snake_case__: float , snake_case__: float , snake_case__: float , snake_case__: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__a = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 30
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'data2vec-audio'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : str=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_9 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Dict=0.05 , SCREAMING_SNAKE_CASE_ : List[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : int="sum" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE_ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : Any=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Any=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = feat_extract_activation
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = conv_pos_kernel_size
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layerdrop
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = vocab_size
lowercase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# adapter
lowercase_ = add_adapter
lowercase_ = adapter_kernel_size
lowercase_ = adapter_stride
lowercase_ = num_adapter_layers
lowercase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = xvector_output_dim
@property
def _lowercase ( self : Any ) -> Optional[int]:
return math.prod(self.conv_stride )
| 30
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__a = logging.get_logger(__name__)
class lowercase__:
"""simple docstring"""
a :str
a :str = None
@staticmethod
def _lowercase ( ) -> Optional[int]:
raise NotImplementedError
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
raise NotImplementedError
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
def _lowercase ( self : Tuple ) -> Union[str, Any]:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _lowercase ( cls : Dict ) -> Dict:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'optuna'
@staticmethod
def _lowercase ( ) -> str:
return is_optuna_available()
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> str:
return run_hp_search_optuna(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
return default_hp_space_optuna(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'ray'
a :Optional[int] = '\'ray[tune]\''
@staticmethod
def _lowercase ( ) -> Any:
return is_ray_available()
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
return run_hp_search_ray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
return default_hp_space_ray(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'sigopt'
@staticmethod
def _lowercase ( ) -> Union[str, Any]:
return is_sigopt_available()
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'wandb'
@staticmethod
def _lowercase ( ) -> str:
return is_wandb_available()
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return run_hp_search_wandb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
return default_hp_space_wandb(SCREAMING_SNAKE_CASE_ )
__a = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a ( ):
'''simple docstring'''
lowercase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
lowercase_ = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F'''{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Any=1_8 , SCREAMING_SNAKE_CASE_ : Tuple=3_0 , SCREAMING_SNAKE_CASE_ : int=4_0_0 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : List[str]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
lowercase_ = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
def _lowercase ( self : Tuple ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = DPTImageProcessor if is_vision_available() else None
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = DPTImageProcessingTester(self )
@property
def _lowercase ( self : Any ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def _lowercase ( self : Tuple ) -> List[Any]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self : str ) -> Optional[int]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self : List[str] ) -> str:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__a = get_logger(__name__)
__a = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class lowercase__:
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase__:
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ) -> jnp.ndarray:
for processor in self:
lowercase_ = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : float ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase_ = temperature
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ = scores / self.temperature
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase_ = top_p
lowercase_ = filter_value
lowercase_ = min_tokens_to_keep
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ , lowercase_ = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] )
lowercase_ = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value )
lowercase_ = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 )
lowercase_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase_ = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
# min tokens to keep
lowercase_ = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1]
return next_scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = filter_value
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ , lowercase_ = scores.shape
lowercase_ = jnp.full(batch_size * vocab_size , self.filter_value )
lowercase_ = min(self.top_k , scores.shape[-1] ) # Safety check
lowercase_ , lowercase_ = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase_ = topk_scores.flatten()
lowercase_ = topk_indices.flatten() + shift
lowercase_ = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ )
lowercase_ = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return next_scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
lowercase_ = bos_token_id
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ = jnp.full(scores.shape , -float('''inf''' ) )
lowercase_ = 1 - jnp.bool_(cur_len - 1 )
lowercase_ = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Any:
lowercase_ = max_length
lowercase_ = eos_token_id
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ = jnp.full(scores.shape , -float('''inf''' ) )
lowercase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase_ = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase_ = min_length
lowercase_ = eos_token_id
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowercase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase_ = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> str:
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = begin_index
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
lowercase_ = 1 - jnp.bool_(cur_len - self.begin_index )
lowercase_ = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : list ) -> Tuple:
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
lowercase_ = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
lowercase_ = dict(SCREAMING_SNAKE_CASE_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase_ = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.intaa(SCREAMING_SNAKE_CASE_ )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowercase_ = scores.shape[0]
lowercase_ = self.force_token_array[generation_idx]
lowercase_ = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('''inf''' )
lowercase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase_ = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) )
return new_scores
lowercase_ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , )
return scores
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
lowercase_ = generate_config.eos_token_id
lowercase_ = generate_config.no_timestamps_token_id
lowercase_ = generate_config.no_timestamps_token_id + 1
lowercase_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE_ , '''max_initial_timestamp_index''' ):
lowercase_ = generate_config.max_initial_timestamp_index
else:
lowercase_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase_ = model_config.vocab_size
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
# suppress <|notimestamps|> which is handled by without_timestamps
lowercase_ = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowercase_ = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , )
lowercase_ = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return jnp.where(
SCREAMING_SNAKE_CASE_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , SCREAMING_SNAKE_CASE_ , )
lowercase_ = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , )
lowercase_ = self.timestamp_begin + self.max_initial_timestamp_index
lowercase_ = jnp.where(
SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase_ = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
lowercase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase_ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ , )
lowercase_ = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__:
"""simple docstring"""
def _lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
lowercase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : str ) -> int:
torch.manual_seed(0 )
lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : str ) -> Tuple:
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''prompt''']
lowercase_ = inputs['''generator''']
lowercase_ = inputs['''num_inference_steps''']
lowercase_ = inputs['''output_type''']
if "image" in inputs:
lowercase_ = inputs['''image''']
else:
lowercase_ = None
if "mask_image" in inputs:
lowercase_ = inputs['''mask_image''']
else:
lowercase_ = None
if "original_image" in inputs:
lowercase_ = inputs['''original_image''']
else:
lowercase_ = None
lowercase_ , lowercase_ = pipe.encode_prompt(SCREAMING_SNAKE_CASE_ )
# inputs with prompt converted to embeddings
lowercase_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowercase_ = image
if mask_image is not None:
lowercase_ = mask_image
if original_image is not None:
lowercase_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''generator''']
lowercase_ = inputs['''num_inference_steps''']
lowercase_ = inputs['''output_type''']
# inputs with prompt converted to embeddings
lowercase_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowercase_ = image
if mask_image is not None:
lowercase_ = mask_image
if original_image is not None:
lowercase_ = original_image
lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
def _lowercase ( self : Tuple ) -> Any:
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
| 30
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 1
|
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a = [0, 2_5, 5_0]
__a = [2_5, 5_0, 7_5]
__a = fuzz.membership.trimf(X, abca)
__a = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a = np.ones(7_5)
__a = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
__a = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 30
|
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
import string
import numpy
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class lowercase__:
"""simple docstring"""
a :Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a :str = numpy.vectorize(lambda UpperCAmelCase : x % 36 )
a :int = numpy.vectorize(UpperCAmelCase )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : numpy.ndarray ) -> None:
lowercase_ = self.modulus(SCREAMING_SNAKE_CASE_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.key_string.index(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> str:
return self.key_string[round(SCREAMING_SNAKE_CASE_ )]
def _lowercase ( self : Optional[Any] ) -> None:
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(SCREAMING_SNAKE_CASE_ , len(self.key_string ) ) != 1:
lowercase_ = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(SCREAMING_SNAKE_CASE_ ) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = self.process_text(text.upper() )
lowercase_ = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(SCREAMING_SNAKE_CASE_ ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE_ ) ).T.tolist()[
0
]
lowercase_ = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self : Union[str, Any] ) -> numpy.ndarray:
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(SCREAMING_SNAKE_CASE_ ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE_ ) ).T.tolist()[0]
lowercase_ = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a ( ):
'''simple docstring'''
lowercase_ = int(input('''Enter the order of the encryption key: ''' ) )
lowercase_ = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(snake_case__ ):
lowercase_ = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
lowercase_ = HillCipher(numpy.array(snake_case__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowercase_ = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowercase_ = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
lowercase_ = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 30
|
import argparse
from collections import defaultdict
import yaml
__a = 'docs/source/en/_toctree.yml'
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def a ( snake_case__: List[Any]=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]['''sections''']
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]['''sections''']
lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc['''sections''']
lowercase_ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=0.01 , SCREAMING_SNAKE_CASE_ : int=1_0_0_0 ) -> Tuple:
lowercase_ = p_stop
lowercase_ = max_length
def __iter__( self : List[Any] ) -> Dict:
lowercase_ = 0
lowercase_ = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ = random.random() < self.p_stop
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=True ) -> List[str]:
lowercase_ = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
lowercase_ = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> str:
lowercase_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
lowercase_ = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any:
random.seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
lowercase_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
lowercase_ = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = 4_2
lowercase_ = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
lowercase_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Any:
lowercase_ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DataLoader(list(range(1_6 ) ) , batch_size=4 )
lowercase_ = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase ( self : Any ) -> Any:
Accelerator()
lowercase_ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 30
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
| 1
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> str:
lowercase_ = tempfile.mkdtemp()
# fmt: off
lowercase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase_ = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
lowercase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
lowercase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 30
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
|
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'masked_bert'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = pruning_method
lowercase_ = mask_init
lowercase_ = mask_scale
| 30
| 1
|
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def a ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 30
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'''
def _lowercase ( self : List[str] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=(4, 4, 6_4, 6_4) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Any="CompVis/stable-diffusion-v1-4" ) -> str:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = '''bf16''' if fpaa else None
lowercase_ , lowercase_ = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='''unet''' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Dict=(4, 7_7, 7_6_8) , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Dict:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 9_6, 9_6) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 7_7, 1_0_2_4) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
| 30
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
| 1
|
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Tuple ) -> Any:
lowercase_ = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ = model(input_ids.to(SCREAMING_SNAKE_CASE_ ) , labels=labels.to(SCREAMING_SNAKE_CASE_ ) ).loss
lowercase_ = -(labels.shape[-1] * loss.item())
lowercase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv3FeatureExtractor']
__a = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from ....utils import logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=2_0_4_8 ) -> Dict:
lowercase_ = config.__dict__
lowercase_ = modal_hidden_size
if num_labels:
lowercase_ = num_labels
| 30
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
lowercase_ = data
lowercase_ = None
class lowercase__:
"""simple docstring"""
def __init__( self : Any ) -> List[Any]:
lowercase_ = None
lowercase_ = None
def __iter__( self : int ) -> Iterator[Any]:
lowercase_ = self.head
while self.head:
yield node.data
lowercase_ = node.next
if node == self.head:
break
def __len__( self : str ) -> int:
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> Tuple:
return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
lowercase_ = new_node # first node points itself
lowercase_ = lowercase_ = new_node
elif index == 0: # insert at head
lowercase_ = self.head
lowercase_ = lowercase_ = new_node
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = new_node
if index == len(self ) - 1: # insert at tail
lowercase_ = new_node
def _lowercase ( self : int ) -> List[Any]:
return self.delete_nth(0 )
def _lowercase ( self : str ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = self.head
if self.head == self.tail: # just one node
lowercase_ = lowercase_ = None
elif index == 0: # delete head node
lowercase_ = self.tail.next.next
lowercase_ = self.head.next
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase_ = temp
return delete_node.data
def _lowercase ( self : Union[str, Any] ) -> bool:
return len(self ) == 0
def a ( ):
'''simple docstring'''
lowercase_ = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :int = ['flax']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : str , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Tuple = ['flax']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Any = ['flax']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Any = ['flax']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
| 30
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
def a ( snake_case__: list[int] , snake_case__: list[int] ):
'''simple docstring'''
lowercase_ = len(snake_case__ )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase_ = 0
print(snake_case__ , end=''',''' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=''',''' )
lowercase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = [1, 3, 0, 5, 8, 5]
__a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 30
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
| 1
|
import math
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [True] * n
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < n:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def a ( snake_case__: int = 999_966_663_333 ):
'''simple docstring'''
lowercase_ = math.floor(math.sqrt(snake_case__ ) ) + 100
lowercase_ = prime_sieve(snake_case__ )
lowercase_ = 0
lowercase_ = 0
lowercase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowercase_ = primes[prime_index + 1]
lowercase_ = last_prime**2
lowercase_ = next_prime**2
# Get numbers divisible by lps(current)
lowercase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 30
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'encodec'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , SCREAMING_SNAKE_CASE_ : Dict=2_4_0_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=1_2_8 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Dict=[8, 5, 4, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]="weight_norm" , SCREAMING_SNAKE_CASE_ : str=7 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str="reflect" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : str=1.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : List[str] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self : str ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase ( self : Optional[int] ) -> int:
lowercase_ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase ( self : str ) -> int:
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
def a ( snake_case__: list ):
'''simple docstring'''
for i in range(len(snake_case__ ) - 1 , 0 , -1 ):
lowercase_ = False
for j in range(snake_case__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ = unsorted[j - 1], unsorted[j]
lowercase_ = True
for j in range(snake_case__ ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ = unsorted[j + 1], unsorted[j]
lowercase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = self.dummy_uncond_unet
lowercase_ = KarrasVeScheduler()
lowercase_ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ) -> int:
lowercase_ = '''google/ncsnpp-celebahq-256'''
lowercase_ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = KarrasVeScheduler()
lowercase_ = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase_ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : str=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 1
def _lowercase ( self : str ) -> Any:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[Any] ) -> Optional[Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
lowercase_ = ViTMSNModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
lowercase_ = self.type_sequence_label_size
lowercase_ = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
a :Any = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
a :int = False
a :Optional[Any] = False
a :Dict = False
a :List[str] = False
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = ViTMSNModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def _lowercase ( self : List[Any] ) -> Dict:
pass
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ViTMSNModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[Any] ) -> List[Any]:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def _lowercase ( self : Tuple ) -> Dict:
torch.manual_seed(2 )
lowercase_ = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 1
|
from itertools import product
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = sides_number
lowercase_ = max_face_number * dice_number
lowercase_ = [0] * (max_total + 1)
lowercase_ = 1
lowercase_ = range(snake_case__ , max_face_number + 1 )
for dice_numbers in product(snake_case__ , repeat=snake_case__ ):
lowercase_ = sum(snake_case__ )
totals_frequencies[total] += 1
return totals_frequencies
def a ( ):
'''simple docstring'''
lowercase_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase_ = 0
lowercase_ = 9
lowercase_ = 4 * 9
lowercase_ = 6
for peter_total in range(snake_case__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase_ = (4**9) * (6**6)
lowercase_ = peter_wins_count / total_games_number
lowercase_ = round(snake_case__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30
| 1
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase_ = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case__ )
if matches:
lowercase_ = float(matches[1] )
lowercase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase_ = 1_001
lowercase_ = '''imagenet-1k-id2label.json'''
lowercase_ = '''huggingface/label-files'''
lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(snake_case__ ) + 1: v for k, v in idalabel.items()}
lowercase_ = '''background'''
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def a ( snake_case__: Optional[int] , snake_case__: int , snake_case__: Dict , snake_case__: Optional[Any]=False ):
'''simple docstring'''
lowercase_ = get_mobilenet_va_config(snake_case__ )
# Load 🤗 model
lowercase_ = MobileNetVaForImageClassification(snake_case__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case__ , snake_case__ , snake_case__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase_ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
lowercase_ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase_ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase_ = '''google/''' + model_name
image_processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 30
|
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = UnCLIPImageVariationPipeline
a :Tuple = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
a :Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
a :str = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
a :Optional[int] = False
@property
def _lowercase ( self : Optional[Any] ) -> int:
return 3_2
@property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
return 3_2
@property
def _lowercase ( self : Any ) -> Union[str, Any]:
return self.time_input_dim
@property
def _lowercase ( self : Dict ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
return 1_0_0
@property
def _lowercase ( self : Dict ) -> str:
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : int ) -> int:
torch.manual_seed(0 )
lowercase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase_ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
lowercase_ = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
lowercase_ = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
lowercase_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase ( self : Tuple ) -> List[Any]:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase ( self : Dict ) -> Optional[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowercase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = self.dummy_decoder
lowercase_ = self.dummy_text_proj
lowercase_ = self.dummy_text_encoder
lowercase_ = self.dummy_tokenizer
lowercase_ = self.dummy_super_res_first
lowercase_ = self.dummy_super_res_last
lowercase_ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
lowercase_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
lowercase_ = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
lowercase_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : Any=True ) -> Union[str, Any]:
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = '''cpu'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = '''cpu'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : str ) -> int:
lowercase_ = '''cpu'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
lowercase_ = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
lowercase_ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = torch.device('''cpu''' )
class lowercase__:
"""simple docstring"""
a :Tuple = 1
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase_ = pipe.decoder.dtype
lowercase_ = 1
lowercase_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase_ = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
lowercase_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase_ = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ ).images
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
# Don't pass image, instead pass embedding
lowercase_ = pipeline_inputs.pop('''image''' )
lowercase_ = pipe.image_encoder(SCREAMING_SNAKE_CASE_ ).image_embeds
lowercase_ = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ , image_embeddings=SCREAMING_SNAKE_CASE_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _lowercase ( self : Tuple ) -> List[str]:
lowercase_ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase_ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=SCREAMING_SNAKE_CASE_ )
@skip_mps
def _lowercase ( self : List[str] ) -> str:
lowercase_ = torch_device == '''cpu'''
lowercase_ = True
lowercase_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
def _lowercase ( self : Optional[int] ) -> Tuple:
lowercase_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ )
@skip_mps
def _lowercase ( self : Optional[int] ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase ( self : str ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def _lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
lowercase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
lowercase_ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
lowercase_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ = pipeline(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowercase_ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1_5 )
| 30
|
import argparse
from collections import defaultdict
import yaml
__a = 'docs/source/en/_toctree.yml'
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def a ( snake_case__: List[Any]=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]['''sections''']
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]['''sections''']
lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc['''sections''']
lowercase_ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__a = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , snake_case__ )
return [m.group(0 ) for m in matches]
def a ( ):
'''simple docstring'''
lowercase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowercase_ = collections.defaultdict(snake_case__ )
lowercase_ = collections.defaultdict(snake_case__ )
lowercase_ = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case__ ):
lowercase_ = None
if _re_tf_models.match(snake_case__ ) is not None:
lowercase_ = tf_models
lowercase_ = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
lowercase_ = flax_models
lowercase_ = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
lowercase_ = pt_models
lowercase_ = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_prefix_to_model_type:
lowercase_ = True
break
# Try again after removing the last word in the name
lowercase_ = ''''''.join(camel_case_split(snake_case__ )[:-1] )
lowercase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowercase_ = list(snake_case__ )
all_models.sort()
lowercase_ = {'''model_type''': all_models}
lowercase_ = [pt_models[t] for t in all_models]
lowercase_ = [tf_models[t] for t in all_models]
lowercase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowercase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowercase_ = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowercase_ = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowercase_ = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowercase_ = '''AutoTokenizer'''
lowercase_ = [processors[t] for t in all_models]
return pd.DataFrame(snake_case__ )
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowercase_ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
lowercase_ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case__ , snake_case__ , snake_case__ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case__ , snake_case__ ):
continue
# First extract all model_names
lowercase_ = []
for name in getattr(snake_case__ , snake_case__ ).values():
if isinstance(snake_case__ , snake_case__ ):
model_names.append(snake_case__ )
else:
model_names.extend(list(snake_case__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def a ( snake_case__: List[Any] , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = get_frameworks_table()
lowercase_ = Dataset.from_pandas(snake_case__ )
lowercase_ = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=snake_case__ )
lowercase_ = Dataset.from_json(snake_case__ )
lowercase_ = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(snake_case__ ) )
}
lowercase_ = update_pipeline_and_auto_class_table(snake_case__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowercase_ = sorted(table.keys() )
lowercase_ = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
lowercase_ = Dataset.from_pandas(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(snake_case__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
lowercase_ = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
lowercase_ = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=snake_case__ , repo_type='''dataset''' , token=snake_case__ , commit_message=snake_case__ , )
def a ( ):
'''simple docstring'''
lowercase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowercase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowercase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowercase_ = pipeline_tasks[key]['''pt''']
if isinstance(snake_case__ , (list, tuple) ):
lowercase_ = model[0]
lowercase_ = model.__name__
if model not in in_table.values():
missing.append(snake_case__ )
if len(snake_case__ ) > 0:
lowercase_ = ''', '''.join(snake_case__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 30
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = 'T5Config'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'mt5'
a :List[str] = MTaConfig
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'mt5'
a :int = MTaConfig
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'mt5'
a :int = MTaConfig
| 30
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'timesformer'
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=2_2_4 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Any=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : str=1e-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : List[Any]=0 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = num_frames
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = qkv_bias
lowercase_ = attention_type
lowercase_ = drop_path_rate
| 30
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = '''ylacombe/bark-small'''
lowercase_ = tempfile.mkdtemp()
lowercase_ = '''en_speaker_1'''
lowercase_ = '''This is a test string'''
lowercase_ = '''speaker_embeddings_path.json'''
lowercase_ = '''speaker_embeddings'''
def _lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowercase ( self : Dict ) -> Any:
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowercase ( self : int ) -> Optional[int]:
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ = 3_5
lowercase_ = 2
lowercase_ = 8
lowercase_ = {
'''semantic_prompt''': np.ones(SCREAMING_SNAKE_CASE_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowercase_ = processor(text=self.input_string )
lowercase_ = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
|
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'masked_bert'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = pruning_method
lowercase_ = mask_init
lowercase_ = mask_scale
| 30
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = ['pixel_values']
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Dict , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''shortest_edge''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_flip_channel_order
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> List[Any]:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase_ = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Tuple] = None ) -> str:
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowercase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = logits.argmax(dim=1 )
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 30
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30
| 1
|
class lowercase__: # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> None:
lowercase_ = row
lowercase_ = col
lowercase_ = graph
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
lowercase_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase_ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase_ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> int: # And finally, count all islands.
lowercase_ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase_ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += 1
return count
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( snake_case__: str ):
'''simple docstring'''
return "".join(sorted(snake_case__ ) )
def a ( snake_case__: str ):
'''simple docstring'''
return word_by_signature[signature(snake_case__ )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['PerceiverFeatureExtractor']
__a = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 30
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = dataset
lowercase_ = process
lowercase_ = params
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.dataset )
def __getitem__( self : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
lowercase_ = self.dataset[i]
lowercase_ = self.process(SCREAMING_SNAKE_CASE_ , **self.params )
return processed
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> int:
lowercase_ = loader
lowercase_ = infer
lowercase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase_ = None
lowercase_ = loader_batch_size
# Internal bookkeeping
lowercase_ = None
lowercase_ = None
def __len__( self : Optional[int] ) -> int:
return len(self.loader )
def __iter__( self : List[str] ) -> int:
lowercase_ = iter(self.loader )
return self
def _lowercase ( self : Optional[Any] ) -> Dict:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Convert ModelOutput to tuple first
lowercase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase_ = self._loader_batch_data.__class__(SCREAMING_SNAKE_CASE_ )
self._loader_batch_index += 1
return result
def _lowercase ( self : int ) -> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase_ = next(self.iterator )
lowercase_ = self.infer(SCREAMING_SNAKE_CASE_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowercase_ = processed
else:
lowercase_ = list(processed.keys() )[0]
lowercase_ = processed[key]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase_ = observed_batch_size
# Setting internal index to unwrap the batch
lowercase_ = processed
lowercase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=None ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __iter__( self : Tuple ) -> Optional[Any]:
lowercase_ = iter(self.loader )
lowercase_ = None
return self
def _lowercase ( self : int ) -> str:
if self.subiterator is None:
lowercase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase_ = self.infer(next(self.iterator ) , **self.params )
lowercase_ = next(self.subiterator )
return processed
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __iter__( self : Tuple ) -> Any:
lowercase_ = iter(self.loader )
return self
def _lowercase ( self : List[Any] ) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase_ = False
lowercase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase_ = self.loader_batch_item()
lowercase_ = item.pop('''is_last''' )
accumulator.append(SCREAMING_SNAKE_CASE_ )
if is_last:
return accumulator
while not is_last:
lowercase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowercase_ = processed
else:
lowercase_ = list(processed.keys() )[0]
lowercase_ = processed[key]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase_ = observed_batch_size
lowercase_ = processed
lowercase_ = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase_ = self.loader_batch_item()
lowercase_ = item.pop('''is_last''' )
accumulator.append(SCREAMING_SNAKE_CASE_ )
if is_last:
return accumulator
else:
lowercase_ = processed
lowercase_ = item.pop('''is_last''' )
accumulator.append(SCREAMING_SNAKE_CASE_ )
return accumulator
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dataset , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ = dataset
lowercase_ = key
def __len__( self : Optional[Any] ) -> Optional[int]:
return len(self.dataset )
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
return self.dataset[i][self.key]
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dataset , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = dataset
lowercase_ = keya
lowercase_ = keya
def __len__( self : Tuple ) -> List[str]:
return len(self.dataset )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 2_000_000 ):
'''simple docstring'''
return sum(takewhile(lambda snake_case__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import csv
import tweepy
# Twitter API credentials
__a = ''
__a = ''
__a = ''
__a = ''
def a ( snake_case__: str ):
'''simple docstring'''
# authorize twitter, initialize tweepy
lowercase_ = tweepy.OAuthHandler(snake_case__ , snake_case__ )
auth.set_access_token(snake_case__ , snake_case__ )
lowercase_ = tweepy.API(snake_case__ )
# initialize a list to hold all the tweepy Tweets
lowercase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ = api.user_timeline(screen_name=snake_case__ , count=200 )
# save most recent tweets
alltweets.extend(snake_case__ )
# save the id of the oldest tweet less one
lowercase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ = api.user_timeline(
screen_name=snake_case__ , count=200 , max_id=snake_case__ )
# save most recent tweets
alltweets.extend(snake_case__ )
# update the id of the oldest tweet less one
lowercase_ = alltweets[-1].id - 1
print(F'''...{len(snake_case__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
lowercase_ = csv.writer(snake_case__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(snake_case__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 30
|
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowercase_ = []
def generate(snake_case__: int , snake_case__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase_ , lowercase_ = arr[k - 1], arr[i]
else: # k is odd
lowercase_ , lowercase_ = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 30
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30
| 1
|
import functools
def a ( snake_case__: list[int] , snake_case__: list[int] ):
'''simple docstring'''
# Validation
if not isinstance(snake_case__ , snake_case__ ) or not all(isinstance(snake_case__ , snake_case__ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(snake_case__ ) != 3 or not all(isinstance(snake_case__ , snake_case__ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(snake_case__ ) == 0:
return 0
if min(snake_case__ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(snake_case__ ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowercase_ = set(snake_case__ )
@functools.cache
def dynamic_programming(snake_case__: int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[PIL.Image.Image, np.ndarray]
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : PriorTransformer , SCREAMING_SNAKE_CASE_ : CLIPVisionModel , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , SCREAMING_SNAKE_CASE_ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE_ : ShapERenderer , ) -> int:
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE_ , image_encoder=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , renderer=SCREAMING_SNAKE_CASE_ , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if latents is None:
lowercase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase_ = latents.to(SCREAMING_SNAKE_CASE_ )
lowercase_ = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase_ = torch.device(f'''cuda:{gpu_id}''' )
lowercase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Tuple ) -> Dict:
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] , torch.Tensor ):
lowercase_ = torch.cat(SCREAMING_SNAKE_CASE_ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE_ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase_ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_encoder(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
lowercase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase_ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 2_5 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : float = 4.0 , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowercase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowercase_ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = self._execution_device
lowercase_ = batch_size * num_images_per_prompt
lowercase_ = guidance_scale > 1.0
lowercase_ = self._encode_image(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.scheduler.timesteps
lowercase_ = self.prior.config.num_embeddings
lowercase_ = self.prior.config.embedding_dim
lowercase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase_ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.prior(
SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , proj_embedding=SCREAMING_SNAKE_CASE_ , ).predicted_image_embedding
# remove the variance
lowercase_ , lowercase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase_ , lowercase_ = noise_pred.chunk(2 )
lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase_ = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
lowercase_ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE_ ):
print()
lowercase_ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.stack(SCREAMING_SNAKE_CASE_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowercase_ = images.cpu().numpy()
if output_type == "pil":
lowercase_ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = 'blip_2_vision_model'
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=1_4_0_8 , SCREAMING_SNAKE_CASE_ : List[Any]=6_1_4_4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_9 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : str=2_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_4 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.0_00_01 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : int=1e-10 , SCREAMING_SNAKE_CASE_ : List[Any]=True , **SCREAMING_SNAKE_CASE_ : int , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = patch_size
lowercase_ = image_size
lowercase_ = initializer_range
lowercase_ = attention_dropout
lowercase_ = layer_norm_eps
lowercase_ = hidden_act
lowercase_ = qkv_bias
@classmethod
def _lowercase ( cls : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Dict = 'blip_2_qformer'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Tuple="absolute" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_4_0_8 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = cross_attention_frequency
lowercase_ = encoder_hidden_size
@classmethod
def _lowercase ( cls : int , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'blip-2'
a :int = True
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , **SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if vision_config is None:
lowercase_ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowercase_ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowercase_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase_ = BlipaVisionConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase_ = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE_ )
lowercase_ = self.text_config.tie_word_embeddings
lowercase_ = self.text_config.is_encoder_decoder
lowercase_ = num_query_tokens
lowercase_ = self.vision_config.hidden_size
lowercase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase_ = 1.0
lowercase_ = 0.02
@classmethod
def _lowercase ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : BlipaVisionConfig , SCREAMING_SNAKE_CASE_ : BlipaQFormerConfig , SCREAMING_SNAKE_CASE_ : PretrainedConfig , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Dict:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE_ , )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.vision_config.to_dict()
lowercase_ = self.qformer_config.to_dict()
lowercase_ = self.text_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.