code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int = 101 ) -> List[str]:
lowerCAmelCase__ = length
def __len__( self : Tuple ) -> Union[str, Any]:
return self.length
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int:
return i
class __lowerCamelCase :
"""simple docstring"""
def __call__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE__ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE__ )}
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase__ = nn.Linear(120 , 80 )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Optional[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@require_torch_neuroncore
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = f'--output_dir {output_dir}'.split()
lowerCAmelCase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@require_torch_multi_gpu
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = f'--output_dir {output_dir}'.split()
lowerCAmelCase__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCamelCase = HfArgumentParser((TrainingArguments,))
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCamelCase = DummyDataset(dataset_length)
def _A ( lowerCAmelCase_ : EvalPrediction ):
"""simple docstring"""
lowerCAmelCase__ = list(range(len(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
UpperCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase = 2
UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase = None
| 61 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''Salesforce/blip-image-captioning-base'''
UpperCamelCase_ : List[str] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCamelCase_ : str = '''image_captioner'''
UpperCamelCase_ : Any = AutoModelForVisionaSeq
UpperCamelCase_ : List[Any] = ['''image''']
UpperCamelCase_ : Optional[int] = ['''text''']
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : "Image" ):
return self.pre_processor(images=UpperCAmelCase_ , return_tensors="pt" )
def _A ( self : List[str] , UpperCAmelCase_ : int ):
return self.model.generate(**UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
return self.pre_processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )[0].strip()
| 62 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
a : str = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : Any = [{"type": "code", "content": INSTALL_CONTENT}]
a : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__UpperCAmelCase = get_logger(__name__)
__UpperCAmelCase = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class __lowercase :
@add_start_docstrings(A )
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __lowercase :
@add_start_docstrings(A )
def __call__( self : Tuple ,A : jnp.ndarray ,A : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __lowercase ( __lowerCamelCase ):
@add_start_docstrings(A )
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ,**A : Any ):
'''simple docstring'''
for processor in self:
UpperCAmelCase__ : int = inspect.signature(processor.__call__ ).parameters
if len(A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
UpperCAmelCase__ : str = processor(A ,A ,A ,**A )
else:
UpperCAmelCase__ : Optional[int] = processor(A ,A ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Dict ,A : float ):
'''simple docstring'''
if not isinstance(A ,A ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCAmelCase__ : int = temperature
def __call__( self : Union[str, Any] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = scores / self.temperature
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : float ,A : float = -float("""Inf""" ) ,A : int = 1 ):
'''simple docstring'''
if not isinstance(A ,A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(A ,A ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCAmelCase__ : Dict = top_p
UpperCAmelCase__ : Union[str, Any] = filter_value
UpperCAmelCase__ : List[Any] = min_tokens_to_keep
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = lax.top_k(A ,scores.shape[-1] )
UpperCAmelCase__ : Any = jnp.full_like(A ,self.filter_value )
UpperCAmelCase__ : Tuple = jax.nn.softmax(A ,axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ : Tuple = jnp.roll(A ,1 )
score_mask |= score_mask.at[:, 0].set(A )
# min tokens to keep
UpperCAmelCase__ : Optional[int] = score_mask.at[:, : self.min_tokens_to_keep].set(A )
UpperCAmelCase__ : Dict = jnp.where(A ,A ,A )
UpperCAmelCase__ : Any = jax.lax.sort_key_val(A ,A )[-1]
return next_scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Any ,A : int ,A : float = -float("""Inf""" ) ,A : int = 1 ):
'''simple docstring'''
if not isinstance(A ,A ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCAmelCase__ : Dict = max(A ,A )
UpperCAmelCase__ : str = filter_value
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = scores.shape
UpperCAmelCase__ : Optional[int] = jnp.full(batch_size * vocab_size ,self.filter_value )
UpperCAmelCase__ : Any = min(self.top_k ,scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ : Any = lax.top_k(A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.broadcast_to((jnp.arange(A ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
UpperCAmelCase__ : Union[str, Any] = topk_scores.flatten()
UpperCAmelCase__ : Dict = topk_indices.flatten() + shift
UpperCAmelCase__ : Any = next_scores_flat.at[topk_indices_flat].set(A )
UpperCAmelCase__ : List[str] = next_scores_flat.reshape(A ,A )
return next_scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = bos_token_id
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase__ : List[Any] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ : Any = jnp.where(A ,new_scores.at[:, self.bos_token_id].set(0 ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = max_length
UpperCAmelCase__ : Dict = eos_token_id
def __call__( self : Union[str, Any] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ : Any = jnp.where(A ,new_scores.at[:, self.eos_token_id].set(0 ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : int ,A : int ):
'''simple docstring'''
if not isinstance(A ,A ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(A ,A ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCAmelCase__ : str = min_length
UpperCAmelCase__ : Dict = eos_token_id
def __call__( self : str ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ : List[Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
UpperCAmelCase__ : str = jnp.where(A ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : List[str] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(A )
UpperCAmelCase__ : Optional[int] = begin_index
def __call__( self : Dict ,A : int ,A : List[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ : Tuple = jnp.where(A ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,A )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[str] ,A : list ):
'''simple docstring'''
UpperCAmelCase__ : int = list(A )
def __call__( self : int ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Union[str, Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = dict(A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ : str = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ : str = force_token_array.at[index].set(A )
UpperCAmelCase__ : Union[str, Any] = jnp.intaa(A )
def __call__( self : List[str] ,A : jnp.ndarray ,A : jnp.ndarray ,A : int ):
'''simple docstring'''
def _force_token(A : Any ):
UpperCAmelCase__ : int = scores.shape[0]
UpperCAmelCase__ : Union[str, Any] = self.force_token_array[generation_idx]
UpperCAmelCase__ : str = jnp.ones_like(A ,dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase__ : Any = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
UpperCAmelCase__ : Tuple = lax.dynamic_update_slice(A ,A ,(0, current_token) )
return new_scores
UpperCAmelCase__ : Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(A ) ,lambda: scores ,) ,)
return scores
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : Optional[Any] ,A : List[str] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = generate_config.eos_token_id
UpperCAmelCase__ : Dict = generate_config.no_timestamps_token_id
UpperCAmelCase__ : int = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A ,"""max_initial_timestamp_index""" ):
UpperCAmelCase__ : int = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ : int = model_config.vocab_size
def __call__( self : int ,A : str ,A : Dict ,A : List[str] ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(A : str ,A : Optional[Any] ):
UpperCAmelCase__ : str = jnp.where((cur_len - self.begin_index) >= 1 ,A ,A )
UpperCAmelCase__ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,A ,)
UpperCAmelCase__ : Tuple = jnp.where((cur_len - self.begin_index) < 2 ,A ,A )
UpperCAmelCase__ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,A ,A ,)
return jnp.where(
A ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,A ,)
UpperCAmelCase__ : Any = jax.vmap(A )(A ,A )
UpperCAmelCase__ : Optional[Any] = jnp.where(cur_len == self.begin_index ,A ,A )
UpperCAmelCase__ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,A ,)
UpperCAmelCase__ : Dict = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ : Optional[int] = jnp.where(
A ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,A ,)
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ : Optional[Any] = jax.nn.log_softmax(A ,axis=-1 )
def handle_cumulative_probs(A : Optional[int] ,A : Optional[int] ):
UpperCAmelCase__ : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
UpperCAmelCase__ : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,A ,)
UpperCAmelCase__ : Dict = jax.vmap(A )(A ,A )
return scores
| 65 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = "speech_to_text"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=1_0_0_0_0 , _lowerCAmelCase=1_2 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=6 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=6_0_0_0 , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=2 , _lowerCAmelCase=(5, 5) , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=8_0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ):
_lowercase : Tuple = vocab_size
_lowercase : int = d_model
_lowercase : Tuple = encoder_ffn_dim
_lowercase : Union[str, Any] = encoder_layers
_lowercase : Dict = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : Any = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : Optional[int] = decoder_layerdrop
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = max_source_positions
_lowercase : Any = max_target_positions
_lowercase : Any = num_conv_layers
_lowercase : Union[str, Any] = list(_lowerCAmelCase )
_lowercase : List[Any] = conv_channels
_lowercase : int = input_feat_per_channel
_lowercase : Union[str, Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 66 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''maskformer'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''hidden_size''': '''mask_feature_size'''}
SCREAMING_SNAKE_CASE_ : List[Any] = ['''resnet''', '''swin''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''detr''']
def __init__( self : Any ,__A : int = 256 ,__A : int = 256 ,__A : float = 0.1 ,__A : bool = False ,__A : Optional[Dict] = None ,__A : Optional[Dict] = None ,__A : float = 0.02 ,__A : float = 1.0 ,__A : float = 1.0 ,__A : float = 1.0 ,__A : float = 20.0 ,__A : Optional[bool] = None ,**__A : Tuple ,) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowercase = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(__A ,__A ):
_lowercase = backbone_config.pop('model_type' )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowercase = DetrConfig()
else:
# verify that the decoder is supported
_lowercase = (
decoder_config.pop('model_type' ) if isinstance(__A ,__A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(__A ,__A ):
_lowercase = CONFIG_MAPPING[decoder_type]
_lowercase = config_class.from_dict(__A )
_lowercase = backbone_config
_lowercase = decoder_config
# main feature dimension for the model
_lowercase = fpn_feature_size
_lowercase = mask_feature_size
# initializer
_lowercase = init_std
_lowercase = init_xavier_std
# Hungarian matcher && loss
_lowercase = cross_entropy_weight
_lowercase = dice_weight
_lowercase = mask_weight
_lowercase = use_auxiliary_loss
_lowercase = no_object_weight
_lowercase = output_auxiliary_logits
_lowercase = self.decoder_config.encoder_attention_heads
_lowercase = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : PretrainedConfig ,__A : PretrainedConfig ,**__A : Union[str, Any] ) -> Optional[Any]:
return cls(
backbone_config=__A ,decoder_config=__A ,**__A ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict[str, any]:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.backbone_config.to_dict()
_lowercase = self.decoder_config.to_dict()
_lowercase = self.__class__.model_type
return output | 67 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__SCREAMING_SNAKE_CASE ) )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[sequences]
__UpperCAmelCase =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase )
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=ZeroShotClassificationArgumentHandler() , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any ) -> List[str]:
__UpperCAmelCase =args_parser
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a ( self : Any ) -> Optional[int]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=TruncationStrategy.ONLY_FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
__UpperCAmelCase =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__UpperCAmelCase =self.tokenizer.eos_token
try:
__UpperCAmelCase =self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(__SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__UpperCAmelCase =self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
if kwargs.get("""multi_class""" , __SCREAMING_SNAKE_CASE ) is not None:
__UpperCAmelCase =kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__UpperCAmelCase ={}
if "candidate_labels" in kwargs:
__UpperCAmelCase =self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__UpperCAmelCase =kwargs["""hypothesis_template"""]
__UpperCAmelCase ={}
if "multi_label" in kwargs:
__UpperCAmelCase =kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
if len(__SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(__SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
__UpperCAmelCase =args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]="This example is {}." ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase =self._args_parser(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str:
__UpperCAmelCase =inputs["""candidate_label"""]
__UpperCAmelCase =inputs["""sequence"""]
__UpperCAmelCase ={k: inputs[k] for k in self.tokenizer.model_input_names}
__UpperCAmelCase =self.model(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]:
__UpperCAmelCase =[outputs["""candidate_label"""] for outputs in model_outputs]
__UpperCAmelCase =[outputs["""sequence"""] for outputs in model_outputs]
__UpperCAmelCase =np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__UpperCAmelCase =logits.shape[0]
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =N // n
__UpperCAmelCase =logits.reshape((num_sequences, n, -1) )
if multi_label or len(__SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__UpperCAmelCase =self.entailment_id
__UpperCAmelCase =-1 if entailment_id == 0 else 0
__UpperCAmelCase =reshaped_outputs[..., [contradiction_id, entailment_id]]
__UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__UpperCAmelCase =reshaped_outputs[..., self.entailment_id]
__UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 68 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__snake_case = False
if num < 0:
__snake_case = True
__snake_case = -num
__snake_case = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = 0
while b > 0:
if b & 1:
lowerCamelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'umt5'
UpperCamelCase__ = ['past_key_values']
def __init__( self , snake_case_=25_01_12 , snake_case_=5_12 , snake_case_=64 , snake_case_=10_24 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ):
super().__init__(
is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
lowercase =vocab_size
lowercase =d_model
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =feed_forward_proj
lowercase =use_cache
lowercase =self.feed_forward_proj.split('''-''' )
lowercase =act_info[-1]
lowercase =act_info[0] == '''gated'''
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase ='''gelu_new'''
@property
def _A( self ):
return self.d_model
@property
def _A( self ):
return self.num_heads
@property
def _A( self ):
return self.num_layers
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _A( self ):
lowercase ={
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase ='''past_encoder_sequence + sequence'''
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _A( self ):
return 13
@property
def _A( self ):
return 5E-4
| 72 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.speecht5""")
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
__SCREAMING_SNAKE_CASE : Tuple = checkpoint['''input_conv.weight_g''']
__SCREAMING_SNAKE_CASE : Any = checkpoint['''input_conv.weight_v''']
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
__SCREAMING_SNAKE_CASE : Any = checkpoint[F'''upsamples.{i}.1.weight_g''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
__SCREAMING_SNAKE_CASE : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
__SCREAMING_SNAKE_CASE : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
__SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
__SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.weight_g''']
__SCREAMING_SNAKE_CASE : Tuple = checkpoint['''output_conv.1.weight_v''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(snake_case )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaHifiGanConfig()
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case )
load_weights(orig_checkpoint['''model''']['''generator'''] , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = np.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stats[1].reshape(-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(snake_case ).float()
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(snake_case ).float()
model.save_pretrained(snake_case )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'trajectory_transformer'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , _A : Any=100 , _A : int=5 , _A : Any=1 , _A : int=1 , _A : Dict=249 , _A : Dict=6 , _A : List[str]=17 , _A : int=25 , _A : Tuple=4 , _A : int=4 , _A : Any=128 , _A : Tuple=0.1 , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[str]=0.0_0_0_6 , _A : Union[str, Any]=512 , _A : Tuple=0.0_2 , _A : str=1e-12 , _A : List[str]=1 , _A : List[Any]=True , _A : List[Any]=1 , _A : Union[str, Any]=50_256 , _A : int=50_256 , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : List[str] = action_weight
UpperCAmelCase__ : Optional[int] = reward_weight
UpperCAmelCase__ : str = value_weight
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = block_size
UpperCAmelCase__ : Dict = action_dim
UpperCAmelCase__ : Optional[int] = observation_dim
UpperCAmelCase__ : Dict = transition_dim
UpperCAmelCase__ : Dict = learning_rate
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Optional[int] = n_head
UpperCAmelCase__ : str = n_embd
UpperCAmelCase__ : int = embd_pdrop
UpperCAmelCase__ : List[Any] = attn_pdrop
UpperCAmelCase__ : Any = resid_pdrop
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : List[str] = kaiming_initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 75 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=10_00 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowercase : Any = n - 1
__lowercase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowercase : List[str] = 0
while count < prec:
__lowercase : int = random.randint(2 , n - 1 )
__lowercase : Dict = bin_exp_mod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if b != 1:
__lowercase : Tuple = True
for _ in range(__UpperCamelCase ):
if b == n - 1:
__lowercase : Tuple = False
break
__lowercase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 76 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = WavaVecaPhonemeCTCTokenizer
lowercase_ = False
def a_ ( self : Optional[Any]):
"""simple docstring"""
super().setUp()
__UpperCAmelCase : str = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
__UpperCAmelCase : Any = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase_) + "\n")
def a_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Any=20 , UpperCamelCase_ : Optional[int]=5):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_)) for i in range(len(UpperCamelCase_))]
__UpperCAmelCase : List[str] = list(filter(lambda UpperCamelCase_: [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase_) , UpperCamelCase_))
if max_length is not None and len(UpperCamelCase_) > max_length:
__UpperCAmelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_) < min_length and len(UpperCamelCase_) > 0:
while len(UpperCamelCase_) < min_length:
__UpperCAmelCase : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_)
if " " not in output_txt and len(UpperCamelCase_) > 1:
__UpperCAmelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_)
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_)
)
if with_prefix_space:
__UpperCAmelCase : str = " " + output_txt
__UpperCAmelCase : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
return output_txt, output_ids
def a_ ( self : Optional[int] , **UpperCamelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
__UpperCAmelCase : Dict = tokenizer("m xxx ɪ" , do_phonemize=UpperCamelCase_).input_ids
self.assertEqual(UpperCamelCase_ , [13, 392, 17]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
__UpperCAmelCase : List[Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=UpperCamelCase_).input_ids
self.assertEqual(UpperCamelCase_ , [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa
__UpperCAmelCase : int = tokenizer("maɪ c" , do_phonemize=UpperCamelCase_).input_ids
self.assertEqual(UpperCamelCase_ , [3, 200]) # mai should be <unk> (=3)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
__UpperCAmelCase : int = "Hello how are you"
__UpperCAmelCase : Optional[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
self.assertEqual(UpperCamelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː")
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
__UpperCAmelCase : int = "Hello how are you"
__UpperCAmelCase : Optional[int] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
self.assertEqual(tokenizer(UpperCamelCase_).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_).input_ids)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
__UpperCAmelCase : Dict = "Hello how are you"
__UpperCAmelCase : List[str] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
__UpperCAmelCase : List[Any] = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
__UpperCAmelCase : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__UpperCAmelCase : Any = tokenizer.decode(sample_ids[0])
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , batch_tokens[0])
self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
__UpperCAmelCase : Optional[Any] = "Hello how are you"
__UpperCAmelCase : Dict = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
self.assertEqual(UpperCamelCase_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
__UpperCAmelCase : int = "Hello how are you"
__UpperCAmelCase : List[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
self.assertEqual(tokenizer(UpperCamelCase_).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_).input_ids)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
__UpperCAmelCase : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__UpperCAmelCase : List[str] = tokenizer.decode(sample_ids[0])
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , batch_tokens[0])
self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
__UpperCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase_)
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , batch_tokens[0])
self.assertEqual(UpperCamelCase_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
__UpperCAmelCase : List[str] = "Hello how are you"
__UpperCAmelCase : str = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
__UpperCAmelCase : Dict = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids , filter_word_delimiter_token=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
__UpperCAmelCase : List[str] = "Hello how are you"
__UpperCAmelCase : List[str] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us")
__UpperCAmelCase : Tuple = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids , filter_word_delimiter_token=UpperCamelCase_)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip() , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=UpperCamelCase_)
__UpperCAmelCase : Any = "Hello how are you"
__UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , phonemizer_lang="en-us").input_ids
__UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(UpperCamelCase_ , "ɛ l o h aʊ a ʁ j u")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
__UpperCAmelCase : List[Any] = "Hello how Are you"
__UpperCAmelCase : Tuple = "hello how are you"
__UpperCAmelCase : str = tokenizer(UpperCamelCase_).input_ids
__UpperCAmelCase : Dict = tokenizer(UpperCamelCase_).input_ids
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
__UpperCAmelCase : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = [d[key] for d in offsets]
return retrieved_list
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__UpperCAmelCase : str = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__UpperCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char")) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char") , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset") , [0, 1, 4, 7, 9, 11, 12, 15, 16])
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset") , [1, 4, 6, 9, 10, 12, 15, 16, 17])
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]):
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_))
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase_))
# transform list to ModelOutput
__UpperCAmelCase : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"])
def recursive_check(UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]):
if isinstance(UpperCamelCase_ , UpperCamelCase_):
[recursive_check(UpperCamelCase_ , UpperCamelCase_) for la, la in zip(UpperCamelCase_ , UpperCamelCase_)]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"])
# fmt: off
__UpperCAmelCase : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__UpperCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_)
__UpperCAmelCase : Any = [tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase_ , UpperCamelCase_)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def a_ ( self : Any):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def a_ ( self : Tuple):
"""simple docstring"""
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
def a_ ( self : Any):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
def a_ ( self : int):
"""simple docstring"""
pass
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=UpperCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : Union[str, Any] = tokenizer.vocab_size
__UpperCAmelCase : Any = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase : Tuple = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCAmelCase : Optional[Any] = tokenizer.add_tokens(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = tokenizer.vocab_size
__UpperCAmelCase : List[str] = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_))
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_))
__UpperCAmelCase : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=UpperCamelCase_)
self.assertGreaterEqual(len(UpperCamelCase_) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
__UpperCAmelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCAmelCase : int = tokenizer.add_special_tokens(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.vocab_size
__UpperCAmelCase : Optional[int] = len(UpperCamelCase_)
self.assertNotEqual(UpperCamelCase_ , 0)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_))
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_))
__UpperCAmelCase : List[str] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=UpperCamelCase_)
self.assertGreaterEqual(len(UpperCamelCase_) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def a_ ( self : List[str]):
"""simple docstring"""
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def a_ ( self : Tuple):
"""simple docstring"""
pass
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : Optional[int] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_string(UpperCamelCase_)
self.assertIsInstance(output["text"] , UpperCamelCase_)
| 77 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE_: Union[str, Any] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : str , __a : Optional[Any] , __a : int=None , __a : str=1 ):
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = dataset
UpperCAmelCase_ = len(__a ) if n_tasks is None else n_tasks
UpperCAmelCase_ = n_copies
def __iter__(self : List[Any] ):
UpperCAmelCase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
UpperCAmelCase_ = self.tokenizer(__a , padding=__a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Any , __a : Any , __a : Dict ):
UpperCAmelCase_ = start_length
UpperCAmelCase_ = eof_strings
UpperCAmelCase_ = tokenizer
def __call__(self : Dict , __a : List[Any] , __a : int , **__a : Dict ):
UpperCAmelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = re.split("(%s)" % "|".join(snake_case_ ) , snake_case_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple=20 , **snake_case_ : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = defaultdict(snake_case_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(snake_case_ ) ):
with torch.no_grad():
UpperCAmelCase_ = batch["ids"].shape[-1]
UpperCAmelCase_ = accelerator.unwrap_model(snake_case_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=snake_case_ , **snake_case_ )
# each task is generated batch_size times
UpperCAmelCase_ = batch["task_id"].repeat(snake_case_ )
UpperCAmelCase_ = accelerator.pad_across_processes(
snake_case_ , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ = generated_tokens.cpu().numpy()
UpperCAmelCase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(snake_case_ , snake_case_ ):
gen_token_dict[task].append(snake_case_ )
UpperCAmelCase_ = [[] for _ in range(snake_case_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
code_gens[task].append(remove_last_block(snake_case_ ) )
return code_gens
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser(snake_case_ )
UpperCAmelCase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ = "false"
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ = Accelerator()
set_seed(args.seed , device_specific=snake_case_ )
# Load model and tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ = tokenizer.eos_token
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , snake_case_ , snake_case_ )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ = load_dataset("openai_humaneval" )
UpperCAmelCase_ = load_metric("code_eval" )
UpperCAmelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
UpperCAmelCase_ = args.n_samples // args.batch_size
UpperCAmelCase_ = TokenizedDataset(snake_case_ , human_eval["test"] , n_copies=snake_case_ , n_tasks=snake_case_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
UpperCAmelCase_ = complete_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , n_tasks=snake_case_ , batch_size=args.batch_size , **snake_case_ , )
if accelerator.is_main_process:
UpperCAmelCase_ = []
for task in tqdm(range(snake_case_ ) ):
UpperCAmelCase_ = human_eval["test"][task]["test"]
UpperCAmelCase_ = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_ , UpperCAmelCase_ = code_eval_metric.compute(
references=snake_case_ , predictions=snake_case_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 78 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = get_activation("""swish""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = get_activation("""silu""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""mish""" )
self.assertIsInstance(_lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""gelu""" )
self.assertIsInstance(_lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 79 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__UpperCamelCase : str = logging.getLogger(__name__)
class __UpperCamelCase :
def __init__( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = False
def _a ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if not self.initialized:
__lowercase = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = True
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
self.retriever.index.init_index()
def _a ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.retriever._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ) -> str:
"""simple docstring"""
if index is not None and index.is_initialized() and len(_lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for worker in self.retrieval_workers
] )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase = ray.get(random_worker.retrieve.remote(_lowerCAmelCase , _lowerCAmelCase ) )
else:
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
@classmethod
def _a ( cls : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
return super(_lowerCAmelCase , cls ).get_tokenizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def _a ( cls : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = kwargs.pop("""config""" , _lowerCAmelCase ) or RagConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
__lowercase = rag_tokenizer.question_encoder
__lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase = """custom"""
__lowercase = CustomHFIndex(config.retrieval_vector_size , _lowerCAmelCase )
else:
__lowercase = cls._build_index(_lowerCAmelCase )
return cls(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , retrieval_workers=_lowerCAmelCase , index=_lowerCAmelCase , )
| 80 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : List[Any] = 1
__snake_case : Any = 3
__snake_case : str = (32, 32)
__snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__snake_case : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
__snake_case : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __snake_case ( self : int ) -> Any:
torch.manual_seed(0 )
__snake_case : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCamelCase )
@property
def __snake_case ( self : int ) -> Any:
def extract(*lowerCamelCase : int , **lowerCamelCase : Any ):
class a :
"""simple docstring"""
def __init__( self : Tuple ) -> int:
__snake_case : List[Any] = torch.ones([0] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[str] ) -> Dict:
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def __snake_case ( self : Tuple ) -> Tuple:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.dummy_cond_unet
__snake_case : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : List[Any] = self.dummy_vae
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : List[Any] = 77
__snake_case : int = self.dummy_image.to(lowerCamelCase )
__snake_case : str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case : Dict = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : Any = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : str = "A painting of a squirrel eating a burger"
__snake_case : Optional[int] = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : Any = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , )
__snake_case : Dict = output.images
__snake_case : Tuple = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : Dict = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , return_dict=lowerCamelCase , )[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : Tuple = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : Optional[int] = self.dummy_cond_unet
__snake_case : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : Optional[int] = self.dummy_vae
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : List[str] = 77
__snake_case : Optional[int] = self.dummy_image.to(lowerCamelCase )
# put models in fp16
__snake_case : str = unet.half()
__snake_case : Dict = vae.half()
__snake_case : int = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case : int = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : Dict = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = "A painting of a squirrel eating a burger"
__snake_case : int = torch.manual_seed(0 )
__snake_case : int = alt_pipe(
[prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : Any ) -> List[str]:
__snake_case : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case : Any = init_image.resize((760, 504) )
__snake_case : Optional[Any] = "BAAI/AltDiffusion"
__snake_case : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Dict = "A fantasy landscape, trending on artstation"
__snake_case : int = torch.manual_seed(0 )
__snake_case : Dict = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : Optional[Any] = output.images[0]
__snake_case : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__snake_case : Any = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__snake_case : str = init_image.resize((768, 512) )
__snake_case : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__snake_case : Any = "BAAI/AltDiffusion"
__snake_case : Any = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Optional[Any] = "A fantasy landscape, trending on artstation"
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 81 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''bert'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : int=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : str , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 82 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = first_str.lower().strip()
_lowerCamelCase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowerCamelCase : str = first_str.replace(''' ''', '''''' )
_lowerCamelCase : Dict = second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(A_ ) != len(A_ ):
return False
# Default values for count should be 0
_lowerCamelCase : defaultdict[str, int] = defaultdict(A_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = input('''Enter the first string ''').strip()
lowerCAmelCase__ = input('''Enter the second string ''').strip()
lowerCAmelCase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 83 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Checks if the entire collection has been sorted
if len(__SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(__SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(__SCREAMING_SNAKE_CASE , n - 1 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Checks order between adjacent elements
if index >= len(__SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase , lowercase = (
collection[index],
collection[index - 1],
)
insert_next(__SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
UpperCAmelCase = input('''Enter integers separated by spaces: ''')
UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 84 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a_ )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_verbosity()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Any = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(a_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE__ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : int = os.getenv('TRANSFORMERS_VERBOSITY' , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE__ : str = logging.get_verbosity()
self.assertEqual(
a_ , a_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE__ : List[Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : int = logging.logging.getLogger()
with CaptureLogger(a_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , msg + '\n' )
def _a ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 85 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
from __future__ import annotations
__a :List[Any] = list[list[int]]
# assigning initial values to the grid
__a :Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__a :Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __snake_case ( __UpperCamelCase : Matrix ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __snake_case ( __UpperCamelCase : Matrix ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __snake_case ( __UpperCamelCase : Matrix ):
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
A_ , A_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
A_ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
A_ = 0
return None
def __snake_case ( __UpperCamelCase : Matrix ):
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase ,end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__a :int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.') | 86 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[str]=6.0 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple="fp4" , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Any , ) ->List[str]:
'''simple docstring'''
A__ = load_in_abit
A__ = load_in_abit
A__ = llm_inta_threshold
A__ = llm_inta_skip_modules
A__ = llm_inta_enable_fpaa_cpu_offload
A__ = llm_inta_has_fpaa_weight
A__ = bnb_abit_quant_type
A__ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A__ = torch.floataa
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , torch.dtype):
A__ = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''')
self.post_init()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , UpperCAmelCase__):
raise ValueError('''llm_int8_threshold must be a float''')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase__):
raise ValueError('''llm_int8_skip_modules must be a list of strings''')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase__):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''')
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase__):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''')
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase__):
raise ValueError('''bnb_4bit_quant_type must be a string''')
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase__):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''')
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''')) >= version.parse(
'''0.39.0'''):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
A__ = cls(**UpperCAmelCase__)
A__ = []
for key, value in kwargs.items():
if hasattr(UpperCAmelCase__ , UpperCAmelCase__):
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
to_remove.append(UpperCAmelCase__)
for key in to_remove:
kwargs.pop(UpperCAmelCase__ , UpperCAmelCase__)
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, os.PathLike]) ->List[str]:
'''simple docstring'''
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
A__ = self.to_dict()
A__ = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__) + '''\n'''
writer.write(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict[str, Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = str(output['''bnb_4bit_compute_dtype''']).split('''.''')[1]
return output
def __repr__( self : List[Any]) ->List[Any]:
'''simple docstring'''
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : bool = True) ->str:
'''simple docstring'''
if use_diff is True:
A__ = self.to_diff_dict()
else:
A__ = self.to_dict()
return json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__) + "\n"
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, Any]:
'''simple docstring'''
A__ = self.to_dict()
# get the default config dict
A__ = BitsAndBytesConfig().to_dict()
A__ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A__ = value
return serializable_config_dict
| 87 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE)
}
| 88 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase_( lowerCamelCase_ ) -> tuple:
return (data["data"], data["target"])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
_lowercase : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
_lowercase : int = xgb.predict(lowerCamelCase_ )
_lowercase : Optional[Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def UpperCamelCase_( ) -> None:
_lowercase : Tuple = fetch_california_housing()
_lowercase , _lowercase : Any = data_handling(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1 )
_lowercase : Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 89 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
A = import_module('tasks' )
try:
A = getattr(snake_case__ , model_args.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A = token_classification_task.get_labels(data_args.labels )
A = dict(enumerate(snake_case__ ) )
A = len(snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]:
A = np.argmax(snake_case__ , axis=2 )
A , A = preds.shape
A = [[] for _ in range(snake_case__ )]
A = [[] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
A , A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__ ),
"precision": precision_score(snake_case__ , snake_case__ ),
"recall": recall_score(snake_case__ , snake_case__ ),
"f1": fa_score(snake_case__ , snake_case__ ),
}
# Data collator
A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
results.update(snake_case__ )
# Predict
if training_args.do_predict:
A = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A = trainer.predict(snake_case__ )
A , A = align_predictions(snake_case__ , snake_case__ )
A = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
A = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ )
return results
def _snake_case ( snake_case__ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 91 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Union[str, Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
lowercase : Union[str, Any] =0
lowercase : Tuple =len(__magic_name__ ) # No of vertices in graph
lowercase : Optional[Any] =[0] * n
lowercase : List[str] =[False] * n
def dfs(__magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[int] ):
lowercase : List[str] =True
lowercase : Union[str, Any] =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ )
lowercase : Dict =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase : Optional[int] =min(low[at] , low[to] )
lowercase : list[tuple[int, int]] =[]
for i in range(__magic_name__ ):
if not visited[i]:
dfs(__magic_name__ , -1 , __magic_name__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCamelCase_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCamelCase_ = '''path-to-your-trained-model'''
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase_ = pipe.to(device)
# to channels last
lowerCamelCase_ = pipe.unet.to(memory_format=torch.channels_last)
lowerCamelCase_ = pipe.vae.to(memory_format=torch.channels_last)
lowerCamelCase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCamelCase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCamelCase_ = torch.randn(2, 4, 64, 64)
lowerCamelCase_ = torch.rand(1) * 999
lowerCamelCase_ = torch.randn(2, 77, 768)
lowerCamelCase_ = (sample, timestep, encoder_hidden_status)
try:
lowerCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCamelCase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCamelCase_ = 666
lowerCamelCase_ = torch.Generator(device).manual_seed(seed)
lowerCamelCase_ = {'''generator''': generator}
if args.steps is not None:
lowerCamelCase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCamelCase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 95 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
__a = 0 # The first color of the flag.
__a = 1 # The second color of the flag.
__a = 2 # The third color of the flag.
__a = (red, white, blue)
def a ( snake_case__: list ):
'''simple docstring'''
if not sequence:
return []
if len(snake_case__ ) == 1:
return list(snake_case__ )
lowercase_ = 0
lowercase_ = len(snake_case__ ) - 1
lowercase_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase_ , lowercase_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase_ , lowercase_ = sequence[high], sequence[mid]
high -= 1
else:
lowercase_ = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(snake_case__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by commas:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
print(f"{dutch_national_flag_sort(unsorted)}")
| 97 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowercase__ : str = 'http://www.mocksite.com/file1.txt'
lowercase__ : Union[str, Any] = '"text": ["foo", "foo"]'
lowercase__ : str = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = 2_0_0
_snake_case : Any = {'Content-Length': '100'}
_snake_case : Dict = {}
def snake_case__ ( self : Tuple , **lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
return [bytes(lowerCAmelCase__ , '''utf-8''' )]
def a__ ( *lowercase : List[str], **lowercase : List[Any] ) -> Tuple:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Any ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(lowercase, '''request''', lowercase )
_UpperCamelCase = URL
if issubclass(lowercase, lowercase ):
_UpperCamelCase = url
elif issubclass(lowercase, lowercase ):
_UpperCamelCase = [url]
elif issubclass(lowercase, lowercase ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(lowercase, lowercase ), use_etag=lowercase, )
_UpperCamelCase = DownloadManager(dataset_name=lowercase, download_config=lowercase )
_UpperCamelCase = dl_manager.download(lowercase )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase, lowercase ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(lowercase, lowercase ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase, lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(lowercase )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def a__ ( lowercase : int, lowercase : Union[str, Any], lowercase : List[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = str(lowercase )
if issubclass(lowercase, lowercase ):
_UpperCamelCase = filename
elif issubclass(lowercase, lowercase ):
_UpperCamelCase = [filename]
elif issubclass(lowercase, lowercase ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=lowercase, use_etag=lowercase, )
_UpperCamelCase = DownloadManager(dataset_name=lowercase, download_config=lowercase )
_UpperCamelCase = dl_manager.extract(lowercase )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase, lowercase ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(lowercase, lowercase ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase, lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(lowercase )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase, etag=lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( lowercase : Tuple, lowercase : Tuple ) -> Tuple:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowercase, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a__ ( lowercase : List[Any], lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(lowercase )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ):
_test_jsonl(lowercase, lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a__ ( lowercase : List[Any], lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(lowercase )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ):
_test_jsonl(lowercase, lowercase )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase ), start=1 ):
assert os.path.basename(lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 98 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """WhisperFeatureExtractor"""
_lowerCamelCase = """WhisperTokenizer"""
def __init__( self , __A , __A ):
super().__init__(__A , __A )
__a = self.feature_extractor
__a = False
def snake_case_ ( self , __A=None , __A=None , __A=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__a = kwargs.pop("""audio""" , __A )
__a = kwargs.pop("""sampling_rate""" , __A )
__a = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__a = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
__a = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
def snake_case_ ( self , __A , __A="np" ):
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
| 99 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = BioGptTokenizer
lowerCamelCase__ : List[Any] = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = '''lower'''
SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 100 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
def a__ ( A__ ):
if length <= 0 or not isinstance(A__, A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 101 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while second != 0:
UpperCamelCase : List[str] = first & second
first ^= second
UpperCamelCase : Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : int = int(input("""Enter the first number: """).strip())
__magic_name__ : Tuple = int(input("""Enter the second number: """).strip())
print(f'''{add(first, second) = }''')
| 102 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : bool = True
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "PIL.Image.Image"
A__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A__ : str = field(default="Image" , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self ) -> str:
return self.pa_type
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = np.array(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
A__ = {}
A__ , A__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(SCREAMING_SNAKE_CASE__ ):
A__ = PIL.Image.open(SCREAMING_SNAKE_CASE__ )
else:
A__ = path.split("::" )[-1]
try:
A__ = string_to_dict(SCREAMING_SNAKE_CASE__ , config.HUB_DATASETS_URL )["repo_id"]
A__ = token_per_repo_id.get(SCREAMING_SNAKE_CASE__ )
except ValueError:
A__ = None
with xopen(SCREAMING_SNAKE_CASE__ , "rb" , use_auth_token=SCREAMING_SNAKE_CASE__ ) as f:
A__ = BytesIO(f.read() )
A__ = PIL.Image.open(bytes_ )
else:
A__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A__ = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
A__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A__ = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
A__ = storage.field("bytes" )
else:
A__ = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
A__ = storage.field("path" )
else:
A__ = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A__ = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A__ = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ ):
with xopen(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
A__ = f.read()
return bytes_
A__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def _lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCamelCase ( UpperCAmelCase_ : "PIL.Image.Image" ) -> bytes:
"""simple docstring"""
A__ = BytesIO()
if image.format in list_image_compression_formats():
A__ = image.format
else:
A__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(UpperCAmelCase_, format=UpperCAmelCase_ )
return buffer.getvalue()
def _lowerCamelCase ( UpperCAmelCase_ : "PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(UpperCAmelCase_, "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase_ )}
def _lowerCamelCase ( UpperCAmelCase_ : np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
A__ = array.dtype
A__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A__ = dtype.kind
A__ = dtype.itemsize
A__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A__ = dtype_byteorder + dtype_kind + str(UpperCAmelCase_ )
A__ = np.dtype(UpperCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
A__ = PIL.Image.fromarray(array.astype(UpperCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase_ )}
def _lowerCamelCase ( UpperCAmelCase_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
A__ , A__ = first_non_null_value(UpperCAmelCase_ )
if isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase_, np.ndarray ):
A__ = no_op_if_value_is_null(UpperCAmelCase_ )
return [obj_to_image_dict_func(UpperCAmelCase_ ) for obj in objs]
elif isinstance(UpperCAmelCase_, PIL.Image.Image ):
A__ = no_op_if_value_is_null(UpperCAmelCase_ )
return [obj_to_image_dict_func(UpperCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 104 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
import os
import sys
import unittest
UpperCamelCase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ : Tuple = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ : List[Any] = '''
{0} = None
'''
UpperCamelCase__ : str = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ : List[Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case__ ,'tokenizers' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case__ ,'tensorflow_text' )
SCREAMING_SNAKE_CASE_ : int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers' )
SCREAMING_SNAKE_CASE_ : str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tensorflow_text' )
SCREAMING_SNAKE_CASE_ : Tuple = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,snake_case__ )
self.assertIn('tensorflow_text' ,snake_case__ )
self.assertIn('sentencepiece_and_tokenizers' ,snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertModel' ,objects['tf'] )
self.assertIn('FlaxBertModel' ,objects['flax'] )
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertTokenizer' ,objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' ,objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(snake_case__ ,'\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
snake_case__ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE_ : List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,snake_case__ )
| 105 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case :List[str] =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : bool , lowerCAmelCase__ : bool ) -> str:
'''simple docstring'''
def run_func(lowerCAmelCase__ : Tuple ):
@wraps(lowerCAmelCase__ )
def run_in_eager_mode(*lowerCAmelCase__ : int , **lowerCAmelCase__ : Optional[Any] ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
@wraps(lowerCAmelCase__ )
@tf.function(experimental_compile=lowerCAmelCase__ )
def run_in_graph_mode(*lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : List[Any] ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
A = random.Random()
A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : TensorFlowBenchmarkArguments
A_ : PretrainedConfig
A_ : str = "TensorFlow"
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return tf.__version__
def __UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> float:
# initialize GPU on separate process
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> float:
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> Callable[[], None]:
A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A = (
hasattr(__UpperCamelCase , 'architectures' )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A = __import__('transformers' , fromlist=[model_class] )
A = getattr(__UpperCamelCase , __UpperCamelCase )
A = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
A = config.vocab_size if hasattr(__UpperCamelCase , 'vocab_size' ) else config.encoder.vocab_size
A = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) -> Callable[[], None]:
A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A = (
hasattr(__UpperCamelCase , 'architectures' )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A = __import__('transformers' , fromlist=[model_class] )
A = getattr(__UpperCamelCase , __UpperCamelCase )
A = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
A = config.vocab_size if hasattr(__UpperCamelCase , 'vocab_size' ) else config.encoder.vocab_size
A = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
A = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
A = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[str] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
A = meminfo.used
A = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
A = None
else:
A = measure_peak_memory_cpu(__UpperCamelCase )
A = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
A = stop_memory_tracing(__UpperCamelCase )
if memory is None:
A = summary.total
else:
A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None | 106 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
_UpperCAmelCase : Optional[Any] = '''us-east-1''' # defaults region
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__lowerCAmelCase = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
__lowerCAmelCase = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def __UpperCAmelCase ( self : List[Any] ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self : List[str] ) -> str:
return f'{self.framework}-transfromers-test'
@property
def __UpperCAmelCase ( self : Any ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
_A = SageMakerTestEnvironment(framework=request.cls.framework )
| 107 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def lowerCamelCase ( self : str , lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
raise NotImplementedError()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , **lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowerCamelCase ( self : Any , lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(lowerCamelCase )
self.on_finalized_text(lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = """"""
_UpperCAmelCase = True
self.on_finalized_text(lowerCamelCase , stream_end=lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Any:
"""simple docstring"""
print(lowerCamelCase , flush=lowerCamelCase , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , lowerCamelCase : Any ) -> int:
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , lowerCamelCase : Optional[float] = None , **lowerCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Optional[int]:
"""simple docstring"""
self.text_queue.put(lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value | 108 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a = TypeVar("T")
class __a ( Generic[T] ):
def __init__( self : Tuple ,lowerCamelCase : T ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = self
__SCREAMING_SNAKE_CASE = 0
class __a ( Generic[T] ):
def __init__( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : T ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DisjointSetTreeNode(lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : T ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.map[data]
if elem_ref != elem_ref.parent:
__SCREAMING_SNAKE_CASE = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : DisjointSetTreeNode[T] ,lowerCamelCase : DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
__SCREAMING_SNAKE_CASE = nodea
else:
__SCREAMING_SNAKE_CASE = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : T ,lowerCamelCase : T ):
'''simple docstring'''
self.link(self.find_set(lowerCamelCase ) ,self.find_set(lowerCamelCase ) )
class __a ( Generic[T] ):
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : T ):
'''simple docstring'''
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : str ,lowerCamelCase : T ,lowerCamelCase : T ,lowerCamelCase : int ):
'''simple docstring'''
self.add_node(lowerCamelCase )
self.add_node(lowerCamelCase )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCamelCase : x[2] )
# creating the disjoint set
__SCREAMING_SNAKE_CASE = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase )
# MST generation
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = edges[index]
index += 1
__SCREAMING_SNAKE_CASE = disjoint_set.find_set(lowerCamelCase )
__SCREAMING_SNAKE_CASE = disjoint_set.find_set(lowerCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
disjoint_set.union(lowerCamelCase ,lowerCamelCase )
return graph
| 109 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE_ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('.' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE_ = int(items[1] )
else:
SCREAMING_SNAKE_CASE_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def lowercase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE_ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE_ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
SCREAMING_SNAKE_CASE_ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = '''mbart50'''
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = 25_00_04
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class __UpperCamelCase ( a_ ):
'''simple docstring'''
__magic_name__ = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ = Features({"text": Value("string" )} )
__magic_name__ = Features({} )
__magic_name__ = "text"
@property
def _UpperCAmelCase ( self ):
return {self.text_column: "text"} | 113 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
_A : Any = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
_A : str = {value: key for key, value in encode_dict.items()}
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Any = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
if set(UpperCAmelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowerCamelCase__ : Union[str, Any] = ''''''
for word in coded.split():
while len(UpperCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase__ : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_lowerCAmelCase: Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_lowerCAmelCase: int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowercase_ (a_ ):
snake_case ='whisper'
snake_case =['past_key_values']
snake_case ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase_=51865 , lowercase_=80 , lowercase_=6 , lowercase_=4 , lowercase_=6 , lowercase_=4 , lowercase_=1536 , lowercase_=1536 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=50257 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=256 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=False , lowercase_=1500 , lowercase_=448 , lowercase_=50256 , lowercase_=50256 , lowercase_=50256 , lowercase_=None , lowercase_=[220, 50256] , lowercase_=False , lowercase_=256 , lowercase_=False , lowercase_=0.05 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=7 , **lowercase_ , ) -> Any:
a__ =vocab_size
a__ =num_mel_bins
a__ =d_model
a__ =encoder_layers
a__ =encoder_attention_heads
a__ =decoder_layers
a__ =decoder_attention_heads
a__ =decoder_ffn_dim
a__ =encoder_ffn_dim
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =activation_function
a__ =init_std
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =use_cache
a__ =encoder_layers
a__ =scale_embedding # scale factor will be sqrt(d_model) if True
a__ =max_source_positions
a__ =max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ =classifier_proj_size
a__ =use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ =apply_spec_augment
a__ =mask_time_prob
a__ =mask_time_length
a__ =mask_time_min_masks
a__ =mask_feature_prob
a__ =mask_feature_length
a__ =mask_feature_min_masks
a__ =median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class lowercase_ (a_ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
a__ =OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ ={0: '''batch'''}
else:
a__ ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='inputs')
return common_inputs
def __UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 22050 , lowercase_ = 5.0 , lowercase_ = 220 , ) -> Mapping[str, Any]:
a__ =OrderedDict()
a__ =OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
a__ =encoder_inputs['''input_features'''].shape[2]
a__ =encoder_sequence_length // 2 if self.use_past else seq_length
a__ =super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case)
a__ =encoder_inputs.pop('input_features')
a__ =decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ =decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __UpperCamelCase ( self) -> float:
return 1e-3
| 20 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
while a != 0:
__UpperCamelCase = b % a, a
return b
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict ) -> int:
"""simple docstring"""
if gcd(__lowercase , __lowercase ) != 1:
__UpperCamelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowercase )
__UpperCamelCase = 1, 0, a
__UpperCamelCase = 0, 1, m
while va != 0:
__UpperCamelCase = ua // va
__UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 399 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_:
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( *__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
pass
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : List[str] = pipeline(
"""document-question-answering""" ,model=_snake_case ,tokenizer=_snake_case ,image_processor=_snake_case )
lowerCAmelCase__ : Union[str, Any] = INVOICE_URL
lowerCAmelCase__ : Dict = list(zip(*apply_tesseract(load_image(_snake_case ) ,_snake_case ,"""""" ) ) )
lowerCAmelCase__ : Any = '''What is the placebo?'''
lowerCAmelCase__ : Optional[int] = [
{
'''image''': load_image(_snake_case ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : int = dqa_pipeline(_snake_case ,top_k=2 )
self.assertEqual(
_snake_case ,[
[
{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case ), """start""": ANY(_snake_case ), """end""": ANY(_snake_case )},
{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case ), """start""": ANY(_snake_case ), """end""": ANY(_snake_case )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Tuple = pipeline("""document-question-answering""" ,model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCAmelCase__ : int = INVOICE_URL
lowerCAmelCase__ : int = '''How many cats are there?'''
lowerCAmelCase__ : Tuple = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowerCAmelCase__ : str = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(nested_simplify(_snake_case ,decimals=4 ) ,_snake_case )
lowerCAmelCase__ : List[Any] = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(nested_simplify(_snake_case ,decimals=4 ) ,_snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ : int = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase__ : Optional[int] = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(_snake_case ,[] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Any = dqa_pipeline(image=_snake_case ,question=_snake_case ,words=_snake_case ,boxes=_snake_case ,top_k=2 )
self.assertEqual(_snake_case ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = pipeline(
"""document-question-answering""" ,model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" ,revision="""9977165""" ,)
lowerCAmelCase__ : Any = INVOICE_URL
lowerCAmelCase__ : Union[str, Any] = '''What is the invoice number?'''
lowerCAmelCase__ : Optional[Any] = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
lowerCAmelCase__ : Tuple = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
lowerCAmelCase__ : Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : int = pipeline(
"""document-question-answering""" ,model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" ,revision="""9977165""" ,max_seq_len=50 ,)
lowerCAmelCase__ : Any = INVOICE_URL
lowerCAmelCase__ : List[Any] = '''What is the invoice number?'''
lowerCAmelCase__ : Any = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
lowerCAmelCase__ : Optional[int] = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
lowerCAmelCase__ : Tuple = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" ,revision="""3dc6de3""" ,add_prefix_space=_snake_case )
lowerCAmelCase__ : Optional[int] = pipeline(
"""document-question-answering""" ,model="""impira/layoutlm-document-qa""" ,tokenizer=_snake_case ,revision="""3dc6de3""" ,)
lowerCAmelCase__ : str = INVOICE_URL
lowerCAmelCase__ : str = '''What is the invoice number?'''
lowerCAmelCase__ : List[Any] = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] ,)
lowerCAmelCase__ : List[Any] = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] ,)
lowerCAmelCase__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 ,)
lowerCAmelCase__ : Dict = list(zip(*apply_tesseract(load_image(_snake_case ) ,_snake_case ,"""""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ : List[str] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" ,revision="""3dc6de3""" ,add_prefix_space=_snake_case )
lowerCAmelCase__ : Union[str, Any] = pipeline(
"""document-question-answering""" ,model="""impira/layoutlm-document-qa""" ,tokenizer=_snake_case ,revision="""3dc6de3""" ,max_seq_len=50 ,)
lowerCAmelCase__ : Optional[int] = INVOICE_URL
lowerCAmelCase__ : Dict = '''What is the invoice number?'''
lowerCAmelCase__ : List[Any] = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
lowerCAmelCase__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
[
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 ,)
lowerCAmelCase__ : Optional[int] = list(zip(*apply_tesseract(load_image(_snake_case ) ,_snake_case ,"""""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ : Any = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ,decimals=4 ) ,[
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] ,)
@slow
@require_torch
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Dict = pipeline(
"""document-question-answering""" ,model="""naver-clova-ix/donut-base-finetuned-docvqa""" ,tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) ,feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" ,)
lowerCAmelCase__ : str = INVOICE_URL
lowerCAmelCase__ : Tuple = '''What is the invoice number?'''
lowerCAmelCase__ : Union[str, Any] = dqa_pipeline(image=_snake_case ,question=_snake_case ,top_k=2 )
self.assertEqual(nested_simplify(_snake_case ,decimals=4 ) ,[{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
| 565 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
__lowerCAmelCase : Any = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": 10_24,
}
class a_ ( a_ ):
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = ['input_ids', 'attention_mask']
UpperCamelCase_ : Optional[Any] = MvpTokenizer
def __init__( self : Dict , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : str="replace" , snake_case__ : Any="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Optional[Any]="<mask>" , snake_case__ : int=False , snake_case__ : Tuple=True , **snake_case__ : Union[str, Any] , ):
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _snake_case ) != add_prefix_space:
lowerCAmelCase__ = getattr(_snake_case , pre_tok_state.pop("""type""" ) )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = pre_tok_class(**_snake_case )
lowerCAmelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase__ = '''post_processor'''
lowerCAmelCase__ = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
lowerCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase__ = tuple(state["""cls"""] )
lowerCAmelCase__ = False
if state.get("""add_prefix_space""" , _snake_case ) != add_prefix_space:
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = True
if state.get("""trim_offsets""" , _snake_case ) != trim_offsets:
lowerCAmelCase__ = trim_offsets
lowerCAmelCase__ = True
if changes_to_apply:
lowerCAmelCase__ = getattr(_snake_case , state.pop("""type""" ) )
lowerCAmelCase__ = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : str ):
lowerCAmelCase__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
lowerCAmelCase__ = value
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
lowerCAmelCase__ = kwargs.get("""is_split_into_words""" , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = kwargs.get("""is_split_into_words""" , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_snake_case , **_snake_case )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=None ):
lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] , snake_case__ : Dict = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 644 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
import math
def snake_case_ ( __snake_case : Optional[int]) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( __snake_case : Dict = 10001) -> int:
try:
lowerCAmelCase_ = int(__snake_case)
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''') from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''')
lowerCAmelCase_ = []
lowerCAmelCase_ = 2
while len(__snake_case) < nth:
if is_prime(__snake_case):
primes.append(__snake_case)
num += 1
else:
num += 1
return primes[len(__snake_case) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 274 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( a_, unittest.TestCase ):
'''simple docstring'''
_snake_case = TextToVideoSDPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_snake_case = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(_snake_case )
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
'''simple docstring'''
if str(_snake_case ).startswith('''mps''' ):
UpperCamelCase = torch.manual_seed(_snake_case )
else:
UpperCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = TextToVideoSDPipeline(**_snake_case )
UpperCamelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCamelCase = self.get_dummy_inputs(_snake_case )
UpperCamelCase = '''np'''
UpperCamelCase = sd_pipe(**_snake_case ).frames
UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
UpperCamelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
UpperCamelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase = pipe.to('''cuda''' )
UpperCamelCase = '''Spiderman is surfing'''
UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=2_5 , output_type='''pt''' ).frames
UpperCamelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
UpperCamelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
UpperCamelCase = pipe.to('''cuda''' )
UpperCamelCase = '''Spiderman is surfing'''
UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''pt''' ).frames
UpperCamelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 212 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a__( a_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 'convbert'
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=768 , __lowerCAmelCase=2 , __lowerCAmelCase=9 , __lowerCAmelCase=1 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = embedding_size
lowerCAmelCase = head_ratio
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = num_groups
lowerCAmelCase = classifier_dropout
class a__( a_ ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( a_ ):
snake_case__ : Dict = 'trocr'
snake_case__ : Any = ['past_key_values']
snake_case__ : Any = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : List[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : str=2 , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
a_ : Any = vocab_size
a_ : Any = d_model
a_ : List[Any] = decoder_layers
a_ : Tuple = decoder_attention_heads
a_ : Optional[int] = decoder_ffn_dim
a_ : Optional[Any] = activation_function
a_ : Optional[int] = max_position_embeddings
a_ : Dict = dropout
a_ : Optional[Any] = attention_dropout
a_ : Optional[Any] = activation_dropout
a_ : Optional[Any] = init_std
a_ : Any = decoder_layerdrop
a_ : List[str] = use_cache
a_ : Optional[Any] = scale_embedding
a_ : str = use_learned_position_embeddings
a_ : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 570 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = nn.functional.normalize(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = nn.functional.normalize(SCREAMING_SNAKE_CASE )
return torch.mm(SCREAMING_SNAKE_CASE , normalized_text_embeds.t() )
class a_ ( a_ ):
A = CLIPConfig
A = ['CLIPEncoderLayer']
def __init__( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_snake_case )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_snake_case )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_snake_case )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(17 ) , requires_grad=_snake_case )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(3 ) , requires_grad=_snake_case )
@torch.no_grad()
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.vision_model(_snake_case )[1] # pooled_output
SCREAMING_SNAKE_CASE_ = self.visual_projection(_snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = cosine_distance(_snake_case , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_ = cosine_distance(_snake_case , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0]
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_ = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_ = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
SCREAMING_SNAKE_CASE_ = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_ = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_ = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_snake_case )
result.append(_snake_case )
SCREAMING_SNAKE_CASE_ = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.vision_model(_snake_case )[1] # pooled_output
SCREAMING_SNAKE_CASE_ = self.visual_projection(_snake_case )
SCREAMING_SNAKE_CASE_ = cosine_distance(_snake_case , self.special_care_embeds )
SCREAMING_SNAKE_CASE_ = cosine_distance(_snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_ = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE_ = special_care * 0.0_1
SCREAMING_SNAKE_CASE_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 205 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=[1, 1, 2] , lowerCamelCase__=1 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=8 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=5_1_2 , lowerCamelCase__=3 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=False , ):
UpperCAmelCase__: Tuple = parent
UpperCAmelCase__: int = batch_size
UpperCAmelCase__: Optional[Any] = seq_length
UpperCAmelCase__: Union[str, Any] = is_training
UpperCAmelCase__: List[str] = use_input_mask
UpperCAmelCase__: str = use_token_type_ids
UpperCAmelCase__: Union[str, Any] = use_labels
UpperCAmelCase__: Union[str, Any] = vocab_size
UpperCAmelCase__: Optional[int] = block_sizes
UpperCAmelCase__: Dict = num_decoder_layers
UpperCAmelCase__: List[Any] = d_model
UpperCAmelCase__: int = n_head
UpperCAmelCase__: Optional[Any] = d_head
UpperCAmelCase__: Optional[int] = d_inner
UpperCAmelCase__: int = hidden_act
UpperCAmelCase__: Dict = hidden_dropout
UpperCAmelCase__: int = attention_dropout
UpperCAmelCase__: Tuple = activation_dropout
UpperCAmelCase__: Any = max_position_embeddings
UpperCAmelCase__: str = type_vocab_size
UpperCAmelCase__: Optional[int] = 2
UpperCAmelCase__: List[Any] = num_labels
UpperCAmelCase__: Optional[Any] = num_choices
UpperCAmelCase__: Union[str, Any] = scope
UpperCAmelCase__: Tuple = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCAmelCase__: List[str] = n_head
# Used in the tests to check the size of the first hidden state
UpperCAmelCase__: Any = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCAmelCase__: List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCAmelCase__: Optional[Any] = self.num_hidden_layers + 2
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__: Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__: Any = None
if self.use_token_type_ids:
UpperCAmelCase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__: int = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
if self.use_labels:
UpperCAmelCase__: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__: Optional[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: int = TFFunnelModel(config=_snake_case )
UpperCAmelCase__: Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: Optional[Any] = model(_snake_case )
UpperCAmelCase__: List[str] = [input_ids, input_mask]
UpperCAmelCase__: Dict = model(_snake_case )
UpperCAmelCase__: str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase__: Dict = False
UpperCAmelCase__: Union[str, Any] = TFFunnelModel(config=_snake_case )
UpperCAmelCase__: Tuple = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase__: Tuple = False
UpperCAmelCase__: Any = TFFunnelModel(config=_snake_case )
UpperCAmelCase__: Dict = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Union[str, Any] = TFFunnelBaseModel(config=_snake_case )
UpperCAmelCase__: Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: int = model(_snake_case )
UpperCAmelCase__: Optional[int] = [input_ids, input_mask]
UpperCAmelCase__: List[Any] = model(_snake_case )
UpperCAmelCase__: Any = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
UpperCAmelCase__: int = False
UpperCAmelCase__: Dict = TFFunnelBaseModel(config=_snake_case )
UpperCAmelCase__: Tuple = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: str = TFFunnelBaseModel(config=_snake_case )
UpperCAmelCase__: str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Union[str, Any] = TFFunnelForPreTraining(config=_snake_case )
UpperCAmelCase__: Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: List[Any] = TFFunnelForMaskedLM(config=_snake_case )
UpperCAmelCase__: Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: Optional[int] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Optional[Any] = self.num_labels
UpperCAmelCase__: Dict = TFFunnelForSequenceClassification(config=_snake_case )
UpperCAmelCase__: Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Dict = self.num_choices
UpperCAmelCase__: Tuple = TFFunnelForMultipleChoice(config=_snake_case )
UpperCAmelCase__: Dict = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__: List[Any] = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__: Any = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__: Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase__: Dict = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Tuple = self.num_labels
UpperCAmelCase__: Any = TFFunnelForTokenClassification(config=_snake_case )
UpperCAmelCase__: Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: int = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
UpperCAmelCase__: Optional[Any] = TFFunnelForQuestionAnswering(config=_snake_case )
UpperCAmelCase__: Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__: Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = self.prepare_config_and_inputs()
(
UpperCAmelCase__
): Dict = config_and_inputs
UpperCAmelCase__: List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a_ ,a_ ,unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = TFFunnelModelTester(self )
UpperCAmelCase__: Any = ConfigTester(self , config_class=_snake_case )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@require_tf
class __UpperCamelCase ( a_ ,unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = False
__magic_name__ = False
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = TFFunnelModelTester(self , base=_snake_case )
UpperCAmelCase__: str = ConfigTester(self , config_class=_snake_case )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case ) | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 315 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase: str = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[Any] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : Tuple ={"""vocab_file""": """vocab.txt"""}
a__ : List[Any] ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a__ : Tuple ={
"""openbmb/cpm-ant-10b""": 1_024,
}
def lowercase__ ( __lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict()
with open(__lowercase , 'r' , encoding='utf-8' ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(__lowercase ):
__UpperCamelCase = token.rstrip('\n' )
__UpperCamelCase = index
return vocab
class snake_case ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Optional[Any] , __A : Union[str, Any]="<unk>" , __A : Any=2_0_0 ):
__UpperCamelCase = vocab
__UpperCamelCase = unk_token
__UpperCamelCase = max_input_chars_per_word
def _lowerCamelCase ( self : Tuple , __A : Optional[Any] ):
__UpperCamelCase = list(_snake_case )
if len(_snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
__UpperCamelCase = 0
__UpperCamelCase = []
while start < len(_snake_case ):
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = None
while start < end:
__UpperCamelCase = ''''''.join(chars[start:end] )
if substr in self.vocab:
__UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_snake_case )
__UpperCamelCase = end
return sub_tokens
class snake_case ( a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] =['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : str =False
def __init__( self : List[Any] , __A : Dict , __A : Optional[Any]="<d>" , __A : Union[str, Any]="</d>" , __A : int="<s>" , __A : str="</s>" , __A : Any="<pad>" , __A : Optional[int]="<unk>" , __A : List[Any]="</n>" , __A : Optional[Any]="</_>" , __A : Union[str, Any]="left" , **__A : List[str] , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_snake_case , eod_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , unk_token=_snake_case , line_token=_snake_case , space_token=_snake_case , padding_side=_snake_case , **_snake_case , )
__UpperCamelCase = bod_token
__UpperCamelCase = eod_token
__UpperCamelCase = load_vocab(_snake_case )
__UpperCamelCase = self.encoder[space_token]
__UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __A : x[1] ) )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self : Dict ):
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self : Any ):
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self : Optional[int] ):
return self.encoder["\n"]
@property
def _lowerCamelCase ( self : Any ):
return len(self.encoder )
def _lowerCamelCase ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : Any , __A : Dict ):
__UpperCamelCase = []
for x in jieba.cut(_snake_case , cut_all=_snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_snake_case ) )
return output_tokens
def _lowerCamelCase ( self : List[str] , __A : Dict , **__A : Tuple ):
__UpperCamelCase = [i for i in token_ids if i >= 0]
__UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_snake_case , **_snake_case )
def _lowerCamelCase ( self : str , __A : Any ):
return token in self.encoder
def _lowerCamelCase ( self : Optional[Any] , __A : str ):
return "".join(_snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __A : str ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : Dict , __A : str ):
return self.decoder.get(_snake_case , self.unk_token )
def _lowerCamelCase ( self : Tuple , __A : int , __A : Union[str, Any] = None ):
if os.path.isdir(_snake_case ):
__UpperCamelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__UpperCamelCase = 0
if " " in self.encoder:
__UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
__UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __A : x[1] ) )
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
__UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self : Any , __A : Any , __A : str = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self : str , __A : Tuple , __A : Optional[Any] = None , __A : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case ))
| 399 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
_lowerCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a_ ):
UpperCamelCase_ : Union[str, Any] = (EulerDiscreteScheduler,)
UpperCamelCase_ : List[str] = 10
def _SCREAMING_SNAKE_CASE ( self : Any , **snake_case__ : Optional[Any] ):
lowerCAmelCase__ = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_snake_case )
return config
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase__ = model(_snake_case , _snake_case )
lowerCAmelCase__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase__ = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase__ = model(_snake_case , _snake_case )
lowerCAmelCase__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase__ = model(_snake_case , _snake_case )
lowerCAmelCase__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(_snake_case , _snake_case )
lowerCAmelCase__ = model(_snake_case , _snake_case )
lowerCAmelCase__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case_ ( __snake_case : Optional[int] , __snake_case : str) -> Union[str, Any]:
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict['''encoder.conv_in.weight''']
lowerCAmelCase_ = vae_state_dict['''encoder.conv_in.bias''']
lowerCAmelCase_ = vae_state_dict['''encoder.conv_out.weight''']
lowerCAmelCase_ = vae_state_dict['''encoder.conv_out.bias''']
lowerCAmelCase_ = vae_state_dict['''encoder.norm_out.weight''']
lowerCAmelCase_ = vae_state_dict['''encoder.norm_out.bias''']
lowerCAmelCase_ = vae_state_dict['''decoder.conv_in.weight''']
lowerCAmelCase_ = vae_state_dict['''decoder.conv_in.bias''']
lowerCAmelCase_ = vae_state_dict['''decoder.conv_out.weight''']
lowerCAmelCase_ = vae_state_dict['''decoder.conv_out.bias''']
lowerCAmelCase_ = vae_state_dict['''decoder.norm_out.weight''']
lowerCAmelCase_ = vae_state_dict['''decoder.norm_out.bias''']
lowerCAmelCase_ = vae_state_dict['''quant_conv.weight''']
lowerCAmelCase_ = vae_state_dict['''quant_conv.bias''']
lowerCAmelCase_ = vae_state_dict['''post_quant_conv.weight''']
lowerCAmelCase_ = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''encoder.down''' in layer})
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(__snake_case)
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''decoder.up''' in layer})
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(__snake_case)
}
for i in range(__snake_case):
lowerCAmelCase_ = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''')
lowerCAmelCase_ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''')
lowerCAmelCase_ = renew_vae_resnet_paths(__snake_case)
lowerCAmelCase_ = {'''old''': F'''down.{i}.block''', '''new''': F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
lowerCAmelCase_ = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1):
lowerCAmelCase_ = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__snake_case)
lowerCAmelCase_ = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
lowerCAmelCase_ = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowerCAmelCase_ = renew_vae_attention_paths(__snake_case)
lowerCAmelCase_ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
conv_attn_to_linear(__snake_case)
for i in range(__snake_case):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowerCAmelCase_ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowerCAmelCase_ = renew_vae_resnet_paths(__snake_case)
lowerCAmelCase_ = {'''old''': F'''up.{block_id}.block''', '''new''': F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
lowerCAmelCase_ = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1):
lowerCAmelCase_ = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__snake_case)
lowerCAmelCase_ = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
lowerCAmelCase_ = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowerCAmelCase_ = renew_vae_attention_paths(__snake_case)
lowerCAmelCase_ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case)
conv_attn_to_linear(__snake_case)
return new_checkpoint
def snake_case_ ( __snake_case : Any , __snake_case : str , ) -> List[str]:
# Only support V1
lowerCAmelCase_ = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''')
lowerCAmelCase_ = io.BytesIO(r.content)
lowerCAmelCase_ = OmegaConf.load(__snake_case)
lowerCAmelCase_ = 512
lowerCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors'''):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__snake_case , framework='''pt''' , device='''cpu''') as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__snake_case)
else:
lowerCAmelCase_ = torch.load(__snake_case , map_location=__snake_case)['''state_dict''']
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__snake_case , image_size=__snake_case)
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__snake_case , __snake_case)
lowerCAmelCase_ = AutoencoderKL(**__snake_case)
vae.load_state_dict(__snake_case)
vae.save_pretrained(__snake_case)
if __name__ == "__main__":
A_ : str =argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
A_ : int =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 274 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : Dict = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
_snake_case = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case = field(
default=a_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_snake_case = field(
default=a_, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case = field(default=a_, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=a_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class lowercase__ :
'''simple docstring'''
_snake_case = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_snake_case = field(
default=a_, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_snake_case = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_snake_case = field(
default=a_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''')
UpperCamelCase = import_module('''tasks''')
try:
UpperCamelCase = getattr(_UpperCAmelCase, model_args.task_type)
UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
UpperCamelCase = token_classification_task.get_labels(data_args.labels)
UpperCamelCase = dict(enumerate(_UpperCAmelCase))
UpperCamelCase = len(_UpperCAmelCase)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_UpperCAmelCase, idalabel=_UpperCAmelCase, labelaid={label: i for i, label in enumerate(_UpperCAmelCase)}, cache_dir=model_args.cache_dir, )
UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, )
UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path), config=_UpperCAmelCase, cache_dir=model_args.cache_dir, )
# Get datasets
UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_UpperCAmelCase, data_dir=data_args.data_dir, tokenizer=_UpperCAmelCase, labels=_UpperCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_UpperCAmelCase, data_dir=data_args.data_dir, tokenizer=_UpperCAmelCase, labels=_UpperCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def align_predictions(_UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any) -> Tuple[List[int], List[int]]:
UpperCamelCase = np.argmax(_UpperCAmelCase, axis=2)
UpperCamelCase = preds.shape
UpperCamelCase = [[] for _ in range(_UpperCAmelCase)]
UpperCamelCase = [[] for _ in range(_UpperCAmelCase)]
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(_UpperCAmelCase : Optional[Any]) -> Dict:
UpperCamelCase = align_predictions(p.predictions, p.label_ids)
return {
"accuracy_score": accuracy_score(_UpperCAmelCase, _UpperCAmelCase),
"precision": precision_score(_UpperCAmelCase, _UpperCAmelCase),
"recall": recall_score(_UpperCAmelCase, _UpperCAmelCase),
"f1": fa_score(_UpperCAmelCase, _UpperCAmelCase),
}
# Data collator
UpperCamelCase = DataCollatorWithPadding(_UpperCAmelCase, pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase = Trainer(
model=_UpperCAmelCase, args=_UpperCAmelCase, train_dataset=_UpperCAmelCase, eval_dataset=_UpperCAmelCase, compute_metrics=_UpperCAmelCase, data_collator=_UpperCAmelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
UpperCamelCase = trainer.evaluate()
UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase, '''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''', _UpperCAmelCase, _UpperCAmelCase)
writer.write('''%s = %s\n''' % (key, value))
results.update(_UpperCAmelCase)
# Predict
if training_args.do_predict:
UpperCamelCase = TokenClassificationDataset(
token_classification_task=_UpperCAmelCase, data_dir=data_args.data_dir, tokenizer=_UpperCAmelCase, labels=_UpperCAmelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, )
UpperCamelCase = trainer.predict(_UpperCAmelCase)
UpperCamelCase = align_predictions(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = os.path.join(training_args.output_dir, '''test_results.txt''')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase, '''w''') as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''', _UpperCAmelCase, _UpperCAmelCase)
writer.write('''%s = %s\n''' % (key, value))
# Save predictions
UpperCamelCase = os.path.join(training_args.output_dir, '''test_predictions.txt''')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase, '''w''') as writer:
with open(os.path.join(data_args.data_dir, '''test.txt'''), '''r''') as f:
token_classification_task.write_predictions_to_file(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
return results
def __snake_case ( _UpperCAmelCase : int):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 212 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( a_ , a_ ):
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] = 1_2_8 , SCREAMING_SNAKE_CASE__ : Any = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_0_0_0.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = 7_6_8 , SCREAMING_SNAKE_CASE__ : int = 1_2 , SCREAMING_SNAKE_CASE__ : List[str] = 1_2 , SCREAMING_SNAKE_CASE__ : str = 6_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict = 0.1 , ) -> Tuple:
super().__init__()
a_ : Optional[Any] = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case ) , nn.SiLU() , )
a_ : List[Any] = nn.Embedding(_snake_case , _snake_case )
a_ : Union[str, Any] = False
a_ : Optional[int] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
a_ : Optional[int] = nn.Dropout(p=_snake_case )
a_ : int = nn.ModuleList()
for lyr_num in range(_snake_case ):
# FiLM conditional T5 decoder
a_ : Tuple = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
self.decoders.append(_snake_case )
a_ : List[str] = TaLayerNorm(_snake_case )
a_ : List[str] = nn.Dropout(p=_snake_case )
a_ : Any = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
a_ : Any = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
a_ : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a_ : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
a_ : Any = self.conditioning_emb(_snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a_ : Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a_ : Union[str, Any] = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
a_ : Union[str, Any] = self.position_encoding(_snake_case )
a_ : List[Any] = self.continuous_inputs_projection(_snake_case )
inputs += position_encodings
a_ : Tuple = self.dropout(_snake_case )
# decoder: No padding present.
a_ : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
a_ : Optional[int] = [(x, self.encoder_decoder_mask(_snake_case , _snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
a_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
a_ : Optional[int] = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
a_ : List[str] = self.decoder_norm(_snake_case )
a_ : int = self.post_dropout(_snake_case )
a_ : Dict = self.spec_out(_snake_case )
return spec_out
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=1E-6 ) -> str:
super().__init__()
a_ : int = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case ) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[Any]:
a_ : int = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
a_ : Optional[int] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
a_ : int = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
a_ : Dict = self.layer[-1](_snake_case , _snake_case )
return (hidden_states,)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
super().__init__()
a_ : Any = TaLayerNorm(_snake_case )
a_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
a_ : Dict = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
a_ : List[Any] = nn.Dropout(_snake_case )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> List[str]:
# pre_self_attention_layer_norm
a_ : Optional[Any] = self.layer_norm(_snake_case )
if conditioning_emb is not None:
a_ : Optional[int] = self.FiLMLayer(_snake_case , _snake_case )
# Self-attention block
a_ : str = self.attention(_snake_case )
a_ : List[Any] = hidden_states + self.dropout(_snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
super().__init__()
a_ : Tuple = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
a_ : str = TaLayerNorm(_snake_case , eps=_snake_case )
a_ : Tuple = nn.Dropout(_snake_case )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> List[Any]:
a_ : Tuple = self.layer_norm(_snake_case )
a_ : Tuple = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1 ) , )
a_ : Union[str, Any] = hidden_states + self.dropout(_snake_case )
return layer_output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
super().__init__()
a_ : int = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
a_ : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
a_ : Any = TaLayerNorm(_snake_case , eps=_snake_case )
a_ : Dict = nn.Dropout(_snake_case )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Any:
a_ : List[Any] = self.layer_norm(_snake_case )
if conditioning_emb is not None:
a_ : List[str] = self.film(_snake_case , _snake_case )
a_ : Union[str, Any] = self.DenseReluDense(_snake_case )
a_ : Any = hidden_states + self.dropout(_snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
super().__init__()
a_ : Tuple = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
a_ : int = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
a_ : Dict = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
a_ : int = nn.Dropout(_snake_case )
a_ : Optional[int] = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
a_ : int = self.act(self.wi_a(_snake_case ) )
a_ : List[Any] = self.wi_a(_snake_case )
a_ : Any = hidden_gelu * hidden_linear
a_ : Optional[Any] = self.dropout(_snake_case )
a_ : Tuple = self.wo(_snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=1E-6 ) -> Optional[Any]:
super().__init__()
a_ : Dict = nn.Parameter(torch.ones(_snake_case ) )
a_ : int = eps
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
a_ : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_snake_case )
a_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a_ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_snake_case , 3.0 )) ))
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
super().__init__()
a_ : Tuple = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
a_ : Tuple = self.scale_bias(_snake_case )
a_ : Any = torch.chunk(_snake_case , 2 , -1 )
a_ : str = x * (1 + scale) + shift
return x
| 570 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( a_ , unittest.TestCase ):
A = XGLMTokenizer
A = XGLMTokenizerFast
A = True
A = True
def A_( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<pad>'''
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def A_( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(_snake_case ) , 1008 )
def A_( self ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def A_( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def A_( self ) -> str:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_snake_case , f.name )
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(f.name , keep_accents=_snake_case )
SCREAMING_SNAKE_CASE_ = pickle.dumps(_snake_case )
pickle.loads(_snake_case )
def A_( self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''Hello World!'''
SCREAMING_SNAKE_CASE_ = [2, 3_1227, 4447, 35]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def A_( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='facebook/xglm-564M' , padding=_snake_case , )
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __UpperCamelCase ( a_ ):
'''simple docstring'''
__magic_name__ = 'Wav2Vec2FeatureExtractor'
__magic_name__ = 'AutoTokenizer'
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
super().__init__(_snake_case , _snake_case )
UpperCAmelCase__: List[str] = self.feature_extractor
UpperCAmelCase__: Optional[int] = False
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
try:
return super().from_pretrained(_snake_case , **_snake_case )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _snake_case , )
UpperCAmelCase__: int = WavaVecaFeatureExtractor.from_pretrained(_snake_case , **_snake_case )
UpperCAmelCase__: Any = WavaVecaCTCTokenizer.from_pretrained(_snake_case , **_snake_case )
return cls(feature_extractor=_snake_case , tokenizer=_snake_case )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase__: Any = kwargs.pop("raw_speech" )
else:
UpperCAmelCase__: Union[str, Any] = kwargs.pop("audio" , _snake_case )
UpperCAmelCase__: Optional[int] = kwargs.pop("sampling_rate" , _snake_case )
UpperCAmelCase__: Optional[Any] = kwargs.pop("text" , _snake_case )
if len(_snake_case ) > 0:
UpperCAmelCase__: Any = args[0]
UpperCAmelCase__: Dict = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase__: Tuple = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if text is not None:
UpperCAmelCase__: Dict = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase__: int = encodings['''input_ids''']
return inputs
def _UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case , **_snake_case )
UpperCAmelCase__: str = kwargs.pop("input_features" , _snake_case )
UpperCAmelCase__: Optional[int] = kwargs.pop("labels" , _snake_case )
if len(_snake_case ) > 0:
UpperCAmelCase__: List[str] = args[0]
UpperCAmelCase__: Optional[int] = args[1:]
if input_features is not None:
UpperCAmelCase__: List[Any] = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case )
if labels is not None:
UpperCAmelCase__: Any = self.tokenizer.pad(_snake_case , **_snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase__: str = labels['''input_ids''']
return input_features
def _UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def _UpperCAmelCase ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase__: Dict = True
UpperCAmelCase__: Union[str, Any] = self.tokenizer
yield
UpperCAmelCase__: Union[str, Any] = self.feature_extractor
UpperCAmelCase__: Tuple = False | 113 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
import functools
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = len(UpperCAmelCase )
lowerCamelCase__ : str = len(UpperCAmelCase )
@functools.cache
def min_distance(UpperCAmelCase , UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase__ : Tuple = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCAmelCase ) , 1 + min_distance(UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
import math
def _lowercase( __a : Optional[Any] ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase( __a : Optional[Any] = 0.1 ):
a__ =3
a__ =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__a )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
a__ : List[Any] =data_utils.TransfoXLTokenizer
a__ : int =data_utils.TransfoXLCorpus
a__ : Tuple =data_utils
a__ : List[str] =data_utils
def lowercase__ ( __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowercase , 'rb' ) as fp:
__UpperCamelCase = pickle.load(__lowercase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__UpperCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__UpperCamelCase = corpus.vocab.__dict__
torch.save(__lowercase , __lowercase )
__UpperCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , __lowercase )
__UpperCamelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__lowercase , __lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__UpperCamelCase = os.path.abspath(__lowercase )
__UpperCamelCase = os.path.abspath(__lowercase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__UpperCamelCase = TransfoXLConfig()
else:
__UpperCamelCase = TransfoXLConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
__UpperCamelCase = TransfoXLLMHeadModel(__lowercase )
__UpperCamelCase = load_tf_weights_in_transfo_xl(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
print(F'''Save PyTorch model to {os.path.abspath(__lowercase )}''' )
torch.save(model.state_dict() , __lowercase )
print(F'''Save configuration file to {os.path.abspath(__lowercase )}''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
a__ : List[str] =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 399 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : str = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Any = True
elif char.isupper():
lowerCAmelCase__ : int = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 565 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if curr_ind == len(lowerCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCamelCase__ ) ):
if valid_connection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Insert current vertex into path as next transition
lowerCAmelCase__ = next_ver
# Validate created path
if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
lowerCAmelCase__ = -1
return False
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 0 ):
"""simple docstring"""
lowerCAmelCase__ = [-1] * (len(lowerCamelCase__ ) + 1)
# initialize start and end of path with starting index
lowerCAmelCase__ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , 1 ) else []
| 644 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : str ={
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =[
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( a_ ):
'''simple docstring'''
_snake_case = ['input_features']
def __init__( self , lowerCamelCase__=8_0 , lowerCamelCase__=1_6_0_0_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
UpperCamelCase = n_fft
UpperCamelCase = hop_length
UpperCamelCase = chunk_length
UpperCamelCase = chunk_length * sampling_rate
UpperCamelCase = self.n_samples // hop_length
UpperCamelCase = sampling_rate
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = spectrogram(
_snake_case , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = np.maximum(_snake_case , log_spec.max() - 8.0 )
UpperCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
UpperCamelCase = np.array(_snake_case , np.intaa )
UpperCamelCase = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
UpperCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase = padding_value
normed_input_values.append(_snake_case )
else:
UpperCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "max_length" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
UpperCamelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech] ).T]
UpperCamelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase = self.pad(
_snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , _snake_case ):
UpperCamelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 212 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
def snake_case__ ( _A: str , _A: Optional[Any] , _A: Any , _A: Any ) -> Union[str, Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , _A , _A , _A )
move_disk(_A , _A )
move_tower(height - 1 , _A , _A , _A )
def snake_case__ ( _A: int , _A: Optional[Any] ) -> Dict:
'''simple docstring'''
print("""moving disk from""" , _A , """to""" , _A )
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = int(input("""Height of hanoi: """ ).strip() )
move_tower(_A , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_8 , SCREAMING_SNAKE_CASE__ : int=3_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Any=[0.5, 0.5, 0.5] , ) -> Tuple:
a_ : List[str] = parent
a_ : Union[str, Any] = batch_size
a_ : Optional[int] = num_channels
a_ : int = image_size
a_ : int = min_resolution
a_ : Optional[Any] = max_resolution
a_ : Any = do_resize
a_ : int = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
a_ : int = do_thumbnail
a_ : Optional[Any] = do_align_axis
a_ : Tuple = do_pad
a_ : Dict = do_normalize
a_ : int = image_mean
a_ : Optional[int] = image_std
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( a_ , unittest.TestCase ):
snake_case__ : str = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Any = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_thumbnail' ) )
self.assertTrue(hasattr(_snake_case , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_snake_case , 'do_pad' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
a_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
a_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
pass
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
# Initialize image_processing
a_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ : Tuple = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# Initialize image_processing
a_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ : Union[str, Any] = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
# Initialize image_processing
a_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
a_ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ : Any = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 570 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger(__name__)
class a_ ( enum.Enum ):
A = 'all_checks'
A = 'basic_checks'
A = 'no_checks'
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
SCREAMING_SNAKE_CASE_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
class a_ ( a_ ):
pass
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
SCREAMING_SNAKE_CASE_ = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> dict:
'''simple docstring'''
if record_checksum:
SCREAMING_SNAKE_CASE_ = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def lowercase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 205 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
from collections.abc import Iterable
from typing import Any
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None ):
UpperCAmelCase__: int = value
UpperCAmelCase__: Node | None = None # Added in order to delete a node easier
UpperCAmelCase__: Node | None = None
UpperCAmelCase__: Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None ):
UpperCAmelCase__: str = root
def __str__( self ):
return str(self.root )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
if new_children is not None: # reset its kids
UpperCAmelCase__: Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
UpperCAmelCase__: str = new_children
else:
UpperCAmelCase__: Any = new_children
else:
UpperCAmelCase__: Any = new_children
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _UpperCAmelCase ( self ):
return self.root is None
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__: Optional[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__: int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__: Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__: Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__: Any = new_node
break
else:
UpperCAmelCase__: str = parent_node.right
UpperCAmelCase__: Any = parent_node
def _UpperCAmelCase ( self , *lowerCamelCase__ ):
for value in values:
self.__insert(_snake_case )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
UpperCAmelCase__: List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__: Optional[Any] = node.left if value < node.value else node.right
return node
def _UpperCAmelCase ( self , lowerCamelCase__ = None ):
if node is None:
if self.root is None:
return None
UpperCAmelCase__: Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__: Tuple = node.right
return node
def _UpperCAmelCase ( self , lowerCamelCase__ = None ):
if node is None:
UpperCAmelCase__: Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__: Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__: List[str] = node.left
return node
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
UpperCAmelCase__: List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__: int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _UpperCAmelCase ( self , lowerCamelCase__=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = []
if curr_node is not None:
UpperCAmelCase__: Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _A ( ):
UpperCAmelCase__: Any = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
UpperCAmelCase__: Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn\'t exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn\'t exist" )
if not t.empty():
print("Max Value: " ,t.get_max().value ) # type: ignore
print("Min Value: " ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( a_ ,unittest.TestCase ):
_UpperCAmelCase : str = OpenAIGPTTokenizer
_UpperCAmelCase : int = OpenAIGPTTokenizerFast
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self : Tuple ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase__ : str = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCamelCase__ : int = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def __lowerCamelCase ( self : List[Any] , A : Optional[Any] ) ->Tuple:
return "lower newer", "lower newer"
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Union[str, Any] = '''lower'''
lowerCamelCase__ : List[str] = ['''low''', '''er</w>''']
lowerCamelCase__ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCamelCase__ : str = tokens + ['''<unk>''']
lowerCamelCase__ : Any = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def __lowerCamelCase ( self : Tuple , A : Union[str, Any]=1_5 ) ->Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# Simple input
lowerCamelCase__ : Tuple = '''This is a simple input'''
lowerCamelCase__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase__ : Dict = ('''This is a simple input''', '''This is a pair''')
lowerCamelCase__ : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def __lowerCamelCase ( self : List[Any] ) ->Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( a_ ):
pass
| 315 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowerCAmelCase: Any = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowerCAmelCase: int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a__ : Union[str, Any] =(720, 1_280) # Height, Width
a__ : str =(0.4, 0.6) # if height or width lower than this scale, drop it.
a__ : Optional[Any] =1 / 100
a__ : Optional[Any] =""""""
a__ : int =""""""
a__ : Union[str, Any] =""""""
a__ : List[Any] =250
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = get_dataset(__lowercase , __lowercase )
for index in range(__lowercase ):
__UpperCamelCase = random.sample(range(len(__lowercase ) ) , 4 )
__UpperCamelCase = update_image_and_anno(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , filter_scale=__lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase = random_chars(32 )
__UpperCamelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
__UpperCamelCase = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCamelCase = []
for anno in new_annos:
__UpperCamelCase = anno[3] - anno[1]
__UpperCamelCase = anno[4] - anno[2]
__UpperCamelCase = anno[1] + width / 2
__UpperCamelCase = anno[2] + height / 2
__UpperCamelCase = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__lowercase )
with open(F'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> tuple[list, list]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = []
for label_file in glob.glob(os.path.join(__lowercase , '*.txt' ) ):
__UpperCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__lowercase ) as in_file:
__UpperCamelCase = in_file.readlines()
__UpperCamelCase = os.path.join(__lowercase , F'''{label_name}.jpg''' )
__UpperCamelCase = []
for obj_list in obj_lists:
__UpperCamelCase = obj_list.rstrip('\n' ).split(' ' )
__UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowercase )
labels.append(__lowercase )
return img_paths, labels
def lowercase__ ( __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Dict , __lowercase : str , __lowercase : List[Any] = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
__UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase = int(scale_x * output_size[1] )
__UpperCamelCase = int(scale_y * output_size[0] )
__UpperCamelCase = []
__UpperCamelCase = []
for i, index in enumerate(__lowercase ):
__UpperCamelCase = all_img_list[index]
path_list.append(__lowercase )
__UpperCamelCase = all_annos[index]
__UpperCamelCase = cva.imread(__lowercase )
if i == 0: # top-left
__UpperCamelCase = cva.resize(__lowercase , (divid_point_x, divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = bbox[1] * scale_x
__UpperCamelCase = bbox[2] * scale_y
__UpperCamelCase = bbox[3] * scale_x
__UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase = cva.resize(__lowercase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase = bbox[2] * scale_y
__UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase = cva.resize(__lowercase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = bbox[1] * scale_x
__UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase = bbox[3] * scale_x
__UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase = cva.resize(
__lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase__ ( __lowercase : Any ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(__lowercase ) for _ in range(__lowercase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 399 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCAmelCase_( a_ ):
'''simple docstring'''
__lowercase : Any = 'openai-gpt'
__lowercase : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,__UpperCAmelCase=4_0478 ,__UpperCAmelCase=512 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase="cls_index" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,**__UpperCAmelCase ,) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = n_positions
lowerCAmelCase__ : List[Any] = n_embd
lowerCAmelCase__ : Any = n_layer
lowerCAmelCase__ : str = n_head
lowerCAmelCase__ : List[str] = afn
lowerCAmelCase__ : Dict = resid_pdrop
lowerCAmelCase__ : Dict = embd_pdrop
lowerCAmelCase__ : Dict = attn_pdrop
lowerCAmelCase__ : Optional[Any] = layer_norm_epsilon
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : str = summary_type
lowerCAmelCase__ : Optional[int] = summary_use_proj
lowerCAmelCase__ : List[str] = summary_activation
lowerCAmelCase__ : Dict = summary_first_dropout
lowerCAmelCase__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(1_00, 0.25) = }")
print(F"{price_plus_tax(1_25.50, 0.05) = }")
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' , return_dict=_snake_case )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''google/ddpm-cifar10-32'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(_snake_case )
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_snake_case , output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 274 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __snake_case ( _UpperCAmelCase : Union[str, Any]=None):
if subparsers is not None:
UpperCamelCase = subparsers.add_parser('''env''')
else:
UpperCamelCase = argparse.ArgumentParser('''Accelerate env command''')
parser.add_argument(
'''--config_file''', default=_UpperCAmelCase, help='''The config file to use for the default values in the launching script.''')
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase)
return parser
def __snake_case ( _UpperCAmelCase : Any):
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = is_xpu_available()
UpperCamelCase = is_npu_available()
UpperCamelCase = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase):
UpperCamelCase = load_config_from_file(args.config_file).to_dict()
UpperCamelCase = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''PyTorch XPU available''': str(_UpperCAmelCase),
'''PyTorch NPU available''': str(_UpperCAmelCase),
'''System RAM''': f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
UpperCamelCase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''')
print('''\n'''.join([f'- {prop}: {val}' for prop, val in info.items()]))
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''')
UpperCamelCase = (
'''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()])
if isinstance(_UpperCAmelCase, _UpperCAmelCase)
else f'\t{accelerate_config}'
)
print(_UpperCAmelCase)
UpperCamelCase = accelerate_config
return info
def __snake_case ( ):
UpperCamelCase = env_command_parser()
UpperCamelCase = parser.parse_args()
env_command(_UpperCAmelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 212 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( _A: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a__:
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = np.abs((a - b)).max()
self.assertLessEqual(_snake_case , _snake_case , f"Difference between torch and flax is {diff} (>= {tol}).")
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel(_snake_case)
lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case)
lowerCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case)
lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case)
lowerCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case)
lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case)
lowerCAmelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case)
lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case)
lowerCAmelCase = after_output[0]
lowerCAmelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_snake_case , 1E-3)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case)
lowerCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case)
lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case)
lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = to_atuple(vision_model.config.image_size)
lowerCAmelCase = to_atuple(vision_model.config.patch_size)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
pt_model.to(_snake_case)
pt_model.eval()
# prepare inputs
lowerCAmelCase = inputs_dict
lowerCAmelCase = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase = pt_model(**_snake_case).to_tuple()
lowerCAmelCase = fx_model(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_snake_case , pt_output.numpy() , 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case , from_pt=_snake_case)
lowerCAmelCase = fx_model_loaded(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_snake_case , pt_output.numpy() , 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_snake_case)
lowerCAmelCase = VisionTextDualEncoderModel.from_pretrained(_snake_case , from_flax=_snake_case)
pt_model_loaded.to(_snake_case)
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase = pt_model_loaded(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_snake_case , pt_output_loaded.numpy() , 4E-2)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case)
lowerCAmelCase = VisionTextDualEncoderModel(_snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel(_snake_case)
lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _snake_case)
lowerCAmelCase = fx_state
self.check_pt_flax_equivalence(_snake_case , _snake_case , _snake_case)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case)
lowerCAmelCase = VisionTextDualEncoderModel(_snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel(_snake_case)
lowerCAmelCase = load_flax_weights_in_pytorch_model(_snake_case , fx_model.params)
self.check_pt_flax_equivalence(_snake_case , _snake_case , _snake_case)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case)
@is_pt_flax_cross_test
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_inputs_dict.pop("""vision_config""")
lowerCAmelCase = config_inputs_dict.pop("""text_config""")
lowerCAmelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(_snake_case , _snake_case , _snake_case)
self.check_equivalence_flax_to_pt(_snake_case , _snake_case , _snake_case)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_pretrained_model_and_inputs()
lowerCAmelCase = model_a(**_snake_case)
lowerCAmelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case)
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case)
lowerCAmelCase = model_a(**_snake_case)
lowerCAmelCase = after_outputs[0]
lowerCAmelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_snake_case , 1E-5)
@require_flax
class a__( a_ , unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_snake_case , text_from_pt=_snake_case , )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
lowerCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
lowerCAmelCase = random_attention_mask([batch_size, 4])
lowerCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = FlaxViTModel(_snake_case)
lowerCAmelCase = FlaxBertModel(_snake_case)
return vision_model, text_model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxViTModelTester(self)
lowerCAmelCase = FlaxBertModelTester(self)
lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase = vision_config_and_inputs
lowerCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a__( a_ , unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_snake_case , text_from_pt=_snake_case , )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
lowerCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
lowerCAmelCase = random_attention_mask([batch_size, 4])
lowerCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModel(_snake_case)
lowerCAmelCase = FlaxBertModel(_snake_case)
return vision_model, text_model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModelTester(self)
lowerCAmelCase = FlaxBertModelTester(self)
lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase = vision_config_and_inputs
lowerCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0)
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""")
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""")
lowerCAmelCase = model(**_snake_case)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase = np.array([[1.2284727, 0.3104122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _snake_case , atol=1E-3))
| 370 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ : Any = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ : List[str] = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class SCREAMING_SNAKE_CASE__ ( a_ ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['input_ids', 'attention_mask']
snake_case__ : Dict = BartTokenizer
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : int , ) -> Optional[int]:
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
a_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _snake_case ) != add_prefix_space:
a_ : List[Any] = getattr(_snake_case , pre_tok_state.pop('type' ) )
a_ : Optional[int] = add_prefix_space
a_ : int = pre_tok_class(**_snake_case )
a_ : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a_ : Optional[int] = '''post_processor'''
a_ : Union[str, Any] = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
a_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ : Tuple = tuple(state['sep'] )
if "cls" in state:
a_ : Any = tuple(state['cls'] )
a_ : Optional[Any] = False
if state.get('add_prefix_space' , _snake_case ) != add_prefix_space:
a_ : Tuple = add_prefix_space
a_ : str = True
if state.get('trim_offsets' , _snake_case ) != trim_offsets:
a_ : List[Any] = trim_offsets
a_ : Optional[int] = True
if changes_to_apply:
a_ : Dict = getattr(_snake_case , state.pop('type' ) )
a_ : Optional[Any] = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
a_ : Tuple = value
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> BatchEncoding:
a_ : Optional[int] = kwargs.get('is_split_into_words' , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> BatchEncoding:
a_ : int = kwargs.get('is_split_into_words' , _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> Tuple[str]:
a_ : str = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[Any]:
a_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str = None ) -> List[int]:
a_ : int = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 570 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = state_dict.pop(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = val
def lowercase ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def lowercase ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE_ = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[:2_56, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[:2_56]
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[2_56:5_12, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[2_56:5_12]
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[-2_56:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[-2_56:]
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 8_00 if '''detection''' in checkpoint_url else 10_00
SCREAMING_SNAKE_CASE_ = target_max_size / current_max_size
SCREAMING_SNAKE_CASE_ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowercase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = F.to_tensor(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = F.normalize(SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = rename_backbone_keys(SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = {0: '''table''', 1: '''table rotated'''}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE_ = 1_25
SCREAMING_SNAKE_CASE_ = 6
SCREAMING_SNAKE_CASE_ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = DetrImageProcessor(
format='coco_detection' , max_size=8_00 if 'detection' in checkpoint_url else 10_00 )
SCREAMING_SNAKE_CASE_ = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE_ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = Image.open(SCREAMING_SNAKE_CASE ).convert('RGB' )
SCREAMING_SNAKE_CASE_ = normalize(resize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = (1, 15, 3)
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE_ = (1, 1_25, 7)
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE_ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[Any] ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
_A : Any = [0, 2, 4, 6, 8]
_A : Optional[int] = [1, 3, 5, 7, 9]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase__ : Tuple = 0
for digit in range(10 ):
lowerCamelCase__ : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCAmelCase , UpperCAmelCase )
return result
lowerCamelCase__ : int = 0
for digita in range(10 ):
lowerCamelCase__ : List[Any] = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase__ : int = ODD_DIGITS
else:
lowerCamelCase__ : Tuple = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase__ : Any = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase , UpperCAmelCase , )
return result
def _a ( UpperCAmelCase = 9 ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase , 0 , [0] * length , UpperCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase_ (a_ ):
snake_case ='bert'
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=_snake_case , **_snake_case)
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =position_embedding_type
a__ =use_cache
a__ =classifier_dropout
class lowercase_ (a_ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 20 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__ ( __lowercase : List[str] = 5000 ) -> int:
"""simple docstring"""
__UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowercase )]
for i, pentagonal_i in enumerate(__lowercase ):
for j in range(__lowercase , len(__lowercase ) ):
__UpperCamelCase = pentagonal_nums[j]
__UpperCamelCase = pentagonal_i + pentagonal_j
__UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(__lowercase ) and is_pentagonal(__lowercase ):
return b
return -1
if __name__ == "__main__":
print(f'{solution() = }')
| 399 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Optional[int] = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Optional[Any] = [chr(UpperCamelCase ) for n in cs]
return dict(zip(UpperCamelCase , UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
lowerCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class lowerCAmelCase_( a_ ):
'''simple docstring'''
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="replace" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Dict:
lowerCAmelCase__ : int = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else bos_token
lowerCAmelCase__ : int = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else eos_token
lowerCAmelCase__ : List[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else sep_token
lowerCAmelCase__ : Any = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else cls_token
lowerCAmelCase__ : int = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else unk_token
lowerCAmelCase__ : List[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Any = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
super().__init__(
errors=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,add_prefix_space=_snake_case ,**_snake_case ,)
with open(_snake_case ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Optional[Any] = json.load(_snake_case )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Any = errors # how to handle errors in decoding
lowerCAmelCase__ : Optional[int] = bytes_to_unicode()
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Optional[int] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : int = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Dict = tuple(_snake_case )
lowerCAmelCase__ : List[Any] = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowerCAmelCase__ : str = min(_snake_case ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(_snake_case ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Any = 0
while i < len(_snake_case ):
try:
lowerCAmelCase__ : Tuple = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Optional[int] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Optional[Any] = tuple(_snake_case )
lowerCAmelCase__ : Optional[int] = new_word
if len(_snake_case ) == 1:
break
else:
lowerCAmelCase__ : Dict = get_pairs(_snake_case )
lowerCAmelCase__ : Tuple = ''' '''.join(_snake_case )
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Dict = []
for token in re.findall(self.pat ,_snake_case ):
lowerCAmelCase__ : int = ''''''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.decoder.get(_snake_case )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = ''''''.join(_snake_case )
lowerCAmelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
_snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
_snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + """\n""" )
lowerCAmelCase__ : Any = 0
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : List[str] = token_index
writer.write(""" """.join(_snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : List[Any] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : List[str] = ''' ''' + text
return (text, kwargs)
| 565 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.