code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : str = get_tests_dir('fixtures')
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def A_ ( self : Union[str, Any] ):
with self.assertRaises(lowercase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
snake_case_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(lowercase_ )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def A_ ( cls : Union[str, Any] ):
snake_case_ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def A_ ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def A_ ( self : int ):
snake_case_ = ViTImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase_ , repo_id='''test-image-processor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def A_ ( self : int ):
snake_case_ = ViTImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def A_ ( self : List[Any] ):
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(lowercase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
snake_case_ = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 640
|
'''simple docstring'''
from collections.abc import Generator
def __magic_name__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
snake_case_ ,snake_case_ = 0, 1
while True:
snake_case_ ,snake_case_ = b, a + b
yield b
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = fibonacci_generator()
while len(str(next(__UpperCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 640
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _a :
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
_lowercase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
_lowercase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
_lowercase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
lowercase__ = processors[data_args.task_name]()
lowercase__ = processor.get_labels()
lowercase__ = len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
lowercase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 429
|
import math
def _a ( SCREAMING_SNAKE_CASE = 1_00 ):
"""simple docstring"""
lowercase__ = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 429
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
print("""Loading config file...""" )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
lowercase_ = []
for k, v in d.items():
lowercase_ = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
lowercase_ = argparse.Namespace()
with open(__lowerCAmelCase , """r""" ) as yaml_file:
try:
lowercase_ = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
lowercase_ = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = MobileViTVaConfig()
lowercase_ = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowercase_ = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowercase_ = 3_84
else:
lowercase_ = 2_56
lowercase_ = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowercase_ = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowercase_ = 3_84
else:
lowercase_ = 2_56
lowercase_ = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowercase_ = 1_51
lowercase_ = 5_12
lowercase_ = """ade20k-id2label.json"""
lowercase_ = True
elif task_name.startswith("""voc_""" ):
lowercase_ = 21
lowercase_ = 5_12
lowercase_ = """pascal-voc-id2label.json"""
lowercase_ = True
# orig_config
lowercase_ = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ = getattr(__lowerCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(__lowerCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ = getattr(__lowerCAmelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ = getattr(__lowerCAmelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowercase_ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowercase_ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
lowercase_ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowercase_ = """huggingface/label-files"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
if base_model:
lowercase_ = """"""
else:
lowercase_ = """mobilevitv2."""
lowercase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ = k[8:]
else:
lowercase_ = k
if ".block." in k:
lowercase_ = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowercase_ = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowercase_ = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowercase_ = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowercase_ = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ = [0, 1]
elif i == 4:
lowercase_ = [0, 1, 2, 3]
elif i == 5:
lowercase_ = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowercase_ = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowercase_ = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowercase_ = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowercase_ = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowercase_ = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowercase_ = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowercase_ = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowercase_ = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowercase_ = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
lowercase_ = False
else:
lowercase_ = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
lowercase_ = False
# remove and rename some keys of load the original model
lowercase_ = checkpoint
remove_unused_keys(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase_ = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowercase_ = outputs.logits
lowercase_ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 567
|
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 567
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
__lowerCAmelCase = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__lowerCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ ):
for patt, repl in iter(UpperCamelCase__ ):
__lowerCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return F'''bert/{name}'''
def create_tf_var(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
__lowerCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__lowerCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__lowerCAmelCase = to_tf_var_name(UpperCamelCase__ )
__lowerCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__lowerCAmelCase = torch_tensor.T
__lowerCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = session.run(UpperCamelCase__ )
print(F'''Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}''' )
__lowerCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def UpperCAmelCase ( UpperCamelCase__=None ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Directory in which to save tensorflow model""" )
__lowerCAmelCase = parser.parse_args(UpperCamelCase__ )
__lowerCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 334
|
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase ( UpperCamelCase__ = 2_00_00_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = [0]
__lowerCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the largest integer less than b_estimate
__lowerCAmelCase = 42
# the triangle number corresponding to b_floor
__lowerCAmelCase = 42
# the triangle number corresponding to b_ceil
__lowerCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase = floor(UpperCamelCase__ )
__lowerCAmelCase = ceil(UpperCamelCase__ )
__lowerCAmelCase = triangle_numbers[b_floor]
__lowerCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_first_guess * triangle_a
__lowerCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase = triangle_b_second_guess * triangle_a
__lowerCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 334
| 1
|
'''simple docstring'''
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : str = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 597
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
SCREAMING_SNAKE_CASE_ = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 597
| 1
|
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657
|
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657
| 1
|
from math import ceil
def __snake_case ( __UpperCamelCase : int = 1001 ):
"""simple docstring"""
A_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
A_ = 2 * i + 1
A_ = 2 * i
A_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a :Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 86
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
| 86
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCamelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCamelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCamelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCamelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCamelCase , default=UpperCamelCase )
parser.add_argument("""--learning_rate""" , type=UpperCamelCase , default=5e-4 )
parser.add_argument("""--seed""" , type=UpperCamelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCamelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCamelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCamelCase , default=0.0_1 )
parser.add_argument("""--output_dir""" , type=UpperCamelCase , default="""./results""" )
return parser.parse_args()
a__ : Union[str, Any] = load("""accuracy""")
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = eval_pred
lowerCamelCase__ = np.argmax(UpperCamelCase , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , a_ : Optional[int] ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ = trainer
def _UpperCamelCase ( self : List[str] , a_ : Tuple , a_ : Any , a_ : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
if control.should_evaluate:
lowerCamelCase__ = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = get_args()
set_seed(args.seed )
lowerCamelCase__ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowerCamelCase__ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase__ = train_test["""test"""].train_test_split(test_size=0.5 )
lowerCamelCase__ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCamelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase__ = False
lowerCamelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCamelCase : Optional[Any] ):
lowerCamelCase__ = tokenizer(example["""src"""] , truncation=UpperCamelCase , max_length=1024 )
lowerCamelCase__ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase__ = train_test_validation.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=train_test_validation["""train"""].column_names , )
lowerCamelCase__ = DataCollatorWithPadding(tokenizer=UpperCamelCase )
lowerCamelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowerCamelCase__ = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , compute_metrics=UpperCamelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 716
|
from string import ascii_lowercase, ascii_uppercase
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
if not sentence:
return ""
lowerCamelCase__ = dict(zip(UpperCamelCase , UpperCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235
| 0
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_SCREAMING_SNAKE_CASE = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] < arr[j]:
_lowerCAmelCase = arr[j]
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase = inner
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
_lowerCAmelCase = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_SCREAMING_SNAKE_CASE = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 18
|
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_snake_case = get_logger(__name__)
_snake_case = Path(__file__).parent / 'model_card_template.md'
_snake_case = uuida().hex
_snake_case = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_snake_case = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _A ( snake_case = None ) -> str:
_lowercase : Dict = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def _A ( snake_case , snake_case = None , snake_case = None ) -> Optional[Any]:
if token is None:
_lowercase : List[Any] = HfFolder.get_token()
if organization is None:
_lowercase : Tuple = whoami(snake_case )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _A ( snake_case , snake_case ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
_lowercase : Tuple = args.hub_token if hasattr(snake_case , "hub_token" ) else None
_lowercase : Optional[int] = get_full_repo_name(snake_case , token=snake_case )
_lowercase : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
_lowercase : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def _A ( snake_case , snake_case = None ) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
_lowercase : Optional[int] = str(Path(snake_case ).as_posix() )
_lowercase : Dict = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
_lowercase : Union[str, Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_snake_case = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_snake_case = os.path.join(hf_cache_home, 'diffusers')
def _A ( snake_case = None , snake_case = None ) -> None:
if new_cache_dir is None:
_lowercase : Optional[int] = DIFFUSERS_CACHE
if old_cache_dir is None:
_lowercase : Any = old_diffusers_cache
_lowercase : int = Path(snake_case ).expanduser()
_lowercase : int = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_lowercase : int = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_snake_case = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_snake_case = 0
else:
with open(cache_version_file) as f:
try:
_snake_case = int(f.read())
except ValueError:
_snake_case = 0
if cache_version < 1:
_snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_snake_case = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def _A ( snake_case , snake_case = None ) -> str:
if variant is not None:
_lowercase : Any = weights_name.split("." )
_lowercase : str = splits[:-1] + [variant] + splits[-1:]
_lowercase : List[str] = ".".join(snake_case )
return weights_name
def _A ( snake_case , *,
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , ) -> Optional[Any]:
_lowercase : Tuple = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
_lowercase : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
_lowercase : List[Any] = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
_lowercase : List[str] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.''' , snake_case , )
try:
# 2. Load model file as usual
_lowercase : Tuple = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 245
| 0
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _lowerCamelCase (__lowerCamelCase : Any ) -> str:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _lowerCamelCase (__lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ) -> List[str]:
return max(metric_fn(__lowerCamelCase , __lowerCamelCase ) for gt in ground_truths )
def _lowerCamelCase (__lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
a__ = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a__ = []
if args.gold_data_mode == "qa":
a__ = pd.read_csv(__lowerCamelCase , sep="\t" , header=__lowerCamelCase )
for answer_list in data[1]:
a__ = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
a__ = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a__ = [[reference] for reference in references]
a__ = a__ = a__ = 0
for prediction, ground_truths in zip(__lowerCamelCase , __lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a__ = 100.0 * em / total
a__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def _lowerCamelCase (__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
a__ = args.k
a__ = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a__ = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a__ = a__ = 0
for hypo, reference in zip(__lowerCamelCase , __lowerCamelCase ):
a__ = set(hypo.split("\t" )[:k] )
a__ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> str:
def strip_title(__lowerCamelCase : Optional[Any] ):
if title.startswith("\"" ):
a__ = title[1:]
if title.endswith("\"" ):
a__ = title[:-1]
return title
a__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase , truncation=__lowerCamelCase , )["input_ids"].to(args.device )
a__ = rag_model.rag.question_encoder(__lowerCamelCase )
a__ = question_enc_outputs[0]
a__ = rag_model.retriever(
__lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ = []
for docs in all_docs:
a__ = [strip_title(__lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__lowerCamelCase ) )
return provenance_strings
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> List[str]:
with torch.no_grad():
a__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase , truncation=__lowerCamelCase )
a__ = inputs_dict.input_ids.to(args.device )
a__ = inputs_dict.attention_mask.to(args.device )
a__ = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase , attention_mask=__lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase , __lowerCamelCase ):
logger.info("Q: {} - A: {}".format(__lowerCamelCase , __lowerCamelCase ) )
return answers
def _lowerCamelCase () -> Optional[Any]:
a__ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__lowerCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__lowerCamelCase , choices=["exact", "compressed", "legacy"] , type=__lowerCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__lowerCamelCase , type=__lowerCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__lowerCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__lowerCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__lowerCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__lowerCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__lowerCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__lowerCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__lowerCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__lowerCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__lowerCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ = parser.parse_args()
a__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def _lowerCamelCase (__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
a__ = {}
if args.model_type is None:
a__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ = args.n_docs
if args.index_name is not None:
a__ = args.index_name
if args.index_path is not None:
a__ = args.index_path
else:
a__ = BartForConditionalGeneration
a__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __lowerCamelCase )
a__ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__lowerCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ = RagRetriever.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a__ = model_class.from_pretrained(__lowerCamelCase , retriever=__lowerCamelCase , **__lowerCamelCase )
model.retriever.init_retrieval()
else:
a__ = model_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
a__ = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) + "\n" )
preds_file.flush()
a__ = []
if len(__lowerCamelCase ) > 0:
a__ = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ : Any = get_args()
main(args)
| 289
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( __lowerCAmelCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = Rectangle(height=0.5 , width=0.5 )
a__ = Rectangle(height=0.25 , width=0.25 )
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("CPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("GPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Model" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
rect.set_stroke(lowerCamelCase )
a__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase , buff=0.0 )
self.add(lowerCamelCase )
model_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase , *lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Loaded Checkpoint" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = fill.copy().set_fill(lowerCamelCase , opacity=0.7 )
target.move_to(lowerCamelCase )
ckpt_arr.append(lowerCamelCase )
a__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase , lowerCamelCase )
a__ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase )
a__ = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Disk" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) , Write(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) )
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(FadeOut(lowerCamelCase ) )
a__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase , lowerCamelCase , *lowerCamelCase , *lowerCamelCase ) , )
self.wait()
| 289
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE = TypeVar('T')
def A_ ( __lowercase ):
return (position - 1) // 2
def A_ ( __lowercase ):
return (2 * position) + 1
def A_ ( __lowercase ):
return (2 * position) + 2
class a__ ( Generic[T] ):
def __init__( self :int ):
'''simple docstring'''
UpperCamelCase_ : list[tuple[T, int]] =[]
UpperCamelCase_ : dict[T, int] ={}
UpperCamelCase_ : int =0
def __len__( self :Dict ):
'''simple docstring'''
return self.elements
def __repr__( self :str ):
'''simple docstring'''
return str(self.heap )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return self.elements == 0
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Tuple , _lowerCamelCase :List[Any] ):
'''simple docstring'''
self.heap.append((elem, weight) )
UpperCamelCase_ : int =self.elements
self.elements += 1
self._bubble_up(_lowerCamelCase )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ : Optional[Any] =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ : List[Any] =self.heap[0]
self._bubble_down(_lowerCamelCase )
return elem
def lowerCamelCase_ ( self :int , _lowerCamelCase :int , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.position_map[elem]
UpperCamelCase_ : Tuple =(elem, weight)
if position > 0:
UpperCamelCase_ : str =get_parent_position(_lowerCamelCase )
UpperCamelCase_ : Any =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
else:
self._bubble_down(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ : Any =get_parent_position(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.heap[curr_pos]
UpperCamelCase_ : str =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_up(_lowerCamelCase )
return None
def lowerCamelCase_ ( self :str , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.position_map[elem]
UpperCamelCase_ : Any =self.heap[curr_pos]
UpperCamelCase_ : Tuple =get_child_left_position(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =get_child_right_position(_lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ : Dict =self.heap[child_left_position]
UpperCamelCase_ : List[str] =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
if child_left_position < self.elements:
UpperCamelCase_ : Union[str, Any] =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ : int =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCamelCase , _lowerCamelCase )
return self._bubble_down(_lowerCamelCase )
return None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =self.heap[nodea_pos][0]
UpperCamelCase_ : str =self.heap[nodea_pos][0]
UpperCamelCase_ : List[str] =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ : Optional[int] =nodea_pos
UpperCamelCase_ : Tuple =nodea_pos
class a__ ( Generic[T] ):
def __init__( self :Dict ):
'''simple docstring'''
UpperCamelCase_ : dict[T, dict[T, int]] ={}
UpperCamelCase_ : int =0
def __repr__( self :str ):
'''simple docstring'''
return str(self.connections )
def __len__( self :int ):
'''simple docstring'''
return self.nodes
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Tuple ):
'''simple docstring'''
if node not in self.connections:
UpperCamelCase_ : List[Any] ={}
self.nodes += 1
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Tuple ):
'''simple docstring'''
self.add_node(_lowerCamelCase )
self.add_node(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =weight
UpperCamelCase_ : Any =weight
def A_ ( __lowercase , ):
UpperCamelCase_ : dict[T, int] ={node: maxsize for node in graph.connections}
UpperCamelCase_ : dict[T, T | None] ={node: None for node in graph.connections}
UpperCamelCase_ : MinPriorityQueue[T] =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_A , _A )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ : Union[str, Any] =priority_queue.extract_min()
UpperCamelCase_ : List[str] =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ : List[str] =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
UpperCamelCase_ : Union[str, Any] =node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ : int =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ : Optional[Any] =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
UpperCamelCase_ : str =node
return dist, parent
| 357
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCAmelCase : Dict = 'hf-internal-testing/tiny-random-bert'
__UpperCAmelCase : str = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__UpperCAmelCase : List[str] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
"""simple docstring"""
snake_case_ :str = cached_file(a , a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a ) ) )
with open(os.path.join(a , "refs" , "main" ) ) as f:
snake_case_ :str = f.read()
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
self.assertTrue(os.path.isfile(a ) )
# File is cached at the same place the second time.
snake_case_ :Optional[int] = cached_file(a , a )
self.assertEqual(a , a )
# Using a specific revision to test the full commit hash.
snake_case_ :List[str] = cached_file(a , a , revision="9b8c223" )
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
def _a ( self ):
"""simple docstring"""
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
snake_case_ :int = cached_file("tiny-random-bert" , a )
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
snake_case_ :Tuple = cached_file(a , a , revision="aaaa" )
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
snake_case_ :Union[str, Any] = cached_file(a , "conf" )
def _a ( self ):
"""simple docstring"""
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
snake_case_ :Any = cached_file(a , "conf" )
with open(os.path.join(a , "refs" , "main" ) ) as f:
snake_case_ :List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , ".no_exist" , a , "conf" ) ) )
snake_case_ :List[Any] = cached_file(a , "conf" , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
snake_case_ :Optional[int] = cached_file(a , "conf" , local_files_only=a , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
snake_case_ :Any = mock.Mock()
snake_case_ :List[str] = 5_00
snake_case_ :Optional[Any] = {}
snake_case_ :Union[str, Any] = HTTPError
snake_case_ :Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a ) as mock_head:
snake_case_ :Tuple = cached_file(a , "conf" , _raise_exceptions_for_connection_errors=a )
self.assertIsNone(a )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
"""simple docstring"""
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
def _a ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , a , revision="ahaha" )
snake_case_ :str = get_file_from_repo("bert-base-cased" , a )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case_ :int = json.loads(open(a , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_68 )
def _a ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ :List[str] = Path(a ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(a , "a.txt" ) , str(a ) )
self.assertIsNone(get_file_from_repo(a , "b.txt" ) )
| 584
| 0
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self: Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(__A )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self: Optional[Any] , __A: Tuple , __A: Optional[int]=0 ) -> str:
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
_A = 2
_A = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
_A = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __A ( self: List[Any] ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self: str ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __A ( self: str ) -> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __A ( self: int ) -> str:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A: List[str] ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
_A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(__A )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = MultiControlNetModel([controlneta, controlneta] )
_A = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self: Optional[int] , __A: List[Any] , __A: Optional[Any]=0 ) -> Optional[int]:
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
_A = 2
_A = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
_A = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __A ( self: Optional[int] ) -> int:
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
pipe.to(__A )
_A = 10.0
_A = 4
_A = self.get_dummy_inputs(__A )
_A = steps
_A = scale
_A = pipe(**__A )[0]
_A = self.get_dummy_inputs(__A )
_A = steps
_A = scale
_A = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_A = self.get_dummy_inputs(__A )
_A = steps
_A = scale
_A = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_A = self.get_dummy_inputs(__A )
_A = steps
_A = scale
_A = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __A ( self: Union[str, Any] ) -> Any:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self: Optional[int] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __A ( self: Union[str, Any] ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __A ( self: str ) -> Optional[int]:
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: List[str] ) -> List[str]:
_A = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
_A = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = '''evil space-punk bird'''
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
_A = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
_A = pipe(
__A , __A , control_image=__A , generator=__A , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 62
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str:
_A = question_encoder
_A = generator
_A = self.question_encoder
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
if os.path.isfile(__A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , '''question_encoder_tokenizer''' )
_A = os.path.join(__A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('''config''' , __A )
if config is None:
_A = RagConfig.from_pretrained(__A )
_A = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_A = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__A , generator=__A )
def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int:
return self.current_tokenizer(*__A , **__A )
def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict:
return self.generator.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple:
return self.generator.decode(*__A , **__A )
def __A ( self: Dict ) -> List[str]:
_A = self.question_encoder
def __A ( self: Union[str, Any] ) -> int:
_A = self.generator
def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __A , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_A = labels['''input_ids''']
return model_inputs
| 62
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase_ , [] , 0 , [0 for i in range(len(lowerCAmelCase_ ) )] )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
for i in range(len(lowerCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowercase = True
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ )
current_sequence.pop()
lowercase = False
__lowerCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 310
|
'''simple docstring'''
from PIL import Image
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def brightness(lowerCAmelCase_ ) -> float:
return 128 + level + (c - 128)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__lowerCamelCase : List[Any] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 310
| 1
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
def __init__( self : int , lowercase_ : Any=None , **lowercase_ : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , __A , )
super().__init__(args=__A , **__A )
| 711
|
'''simple docstring'''
import argparse
__snake_case = """docs/source/_static/js/custom.js"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__snake_case = parser.parse_args()
update_custom_js(args.version)
| 603
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while second != 0:
_lowerCamelCase : int = first & second
first ^= second
_lowerCamelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = int(input('''Enter the first number: ''').strip())
_lowerCAmelCase : Union[str, Any] = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 46
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger('transformers.models.speecht5')
a = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
a = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
a = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
a = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
a = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
a = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
a = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
a = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
a = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
a = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
a = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowercase (snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCAmelCase = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowercase (snake_case__ : int , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase (snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = []
if task == "s2t":
lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase = MAPPING_S2T
lowerCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCAmelCase = None
lowerCAmelCase = MAPPING_T2S
lowerCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase = MAPPING_S2S
lowerCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(snake_case__ )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase = """weight_v"""
elif "bias" in name:
lowerCAmelCase = """bias"""
elif "weight" in name:
lowerCAmelCase = """weight"""
elif "running_mean" in name:
lowerCAmelCase = """running_mean"""
elif "running_var" in name:
lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase = """num_batches_tracked"""
else:
lowerCAmelCase = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase = name.split(""".""" )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def lowercase (snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , ) -> List[str]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase = SpeechTaConfig.from_pretrained(snake_case__ )
else:
lowerCAmelCase = SpeechTaConfig()
if task == "s2t":
lowerCAmelCase = config.max_text_positions
lowerCAmelCase = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
lowerCAmelCase = 1_876
lowerCAmelCase = 600
lowerCAmelCase = config.max_speech_positions
lowerCAmelCase = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
lowerCAmelCase = 1_876
lowerCAmelCase = config.max_speech_positions
lowerCAmelCase = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
lowerCAmelCase = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ )
lowerCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
lowerCAmelCase = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 529
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase (snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ) -> float:
'''simple docstring'''
lowerCAmelCase = a
while True:
lowerCAmelCase = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 529
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Union[str, Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase_ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowerCamelCase_ = tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : int ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 70
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase : int = False
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int , A_ : Dict=32 ) -> Any:
"""simple docstring"""
set_seed(0 )
lowerCamelCase_ = UNetaDModel(sample_size=A_ , in_channels=3 , out_channels=3 )
lowerCamelCase_ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randint(0 , 1000 , (4,) ).long().to(A_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
| 70
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Optional[Any] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : int = 3_2 , lowercase : List[Any]=PILImageResampling.BILINEAR , lowercase : bool = True , **lowercase : str , ) -> None:
'''simple docstring'''
UpperCamelCase__ = do_resize
UpperCamelCase__ = do_rescale
UpperCamelCase__ = size_divisor
UpperCamelCase__ = resample
super().__init__(**lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : List[Any] , lowercase : Optional[ChannelDimension] = None , **lowercase : Any ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = get_image_size(lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ = height // size_divisor * size_divisor
UpperCamelCase__ = width // size_divisor * size_divisor
UpperCamelCase__ = resize(lowercase , (new_h, new_w) , resample=lowercase , data_format=lowercase , **lowercase )
return image
def A ( self : int , lowercase : np.ndarray , lowercase : float , lowercase : Optional[ChannelDimension] = None , **lowercase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : int , lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Any]=None , lowercase : Optional[bool] = None , lowercase : Optional[Union[TensorType, str]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ) -> BatchFeature:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowercase ) for img in images]
if do_resize:
UpperCamelCase__ = [self.resize(lowercase , size_divisor=lowercase , resample=lowercase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(lowercase , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 265
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Any ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
UpperCamelCase__ = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
UpperCamelCase__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCamelCase__ = torch.Generator(device=lowercase ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowercase , steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=lowercase ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
UpperCamelCase__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
UpperCamelCase__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=lowercase ).manual_seed(4_2 )
UpperCamelCase__ = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
UpperCamelCase__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
UpperCamelCase__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 1_0) )
UpperCamelCase__ = pipe(generator=lowercase , encoding=lowercase )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
UpperCamelCase__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : int ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCamelCase__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCamelCase__ = torch.Generator(device=lowercase ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowercase )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
UpperCamelCase__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 265
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase__ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : int = LEDTokenizer
lowerCamelCase : Tuple = LEDTokenizerFast
lowerCamelCase : Optional[Any] = True
def __lowercase ( self ) -> Any:
'''simple docstring'''
super().setUp()
__snake_case :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__snake_case :List[str] = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case :Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case :Optional[Any] = {"""unk_token""": """<unk>"""}
__snake_case :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __lowercase ( self , **a__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , **a__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> Any:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __lowercase ( self ) -> int:
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__snake_case :Optional[Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :List[str] = tokenizer(a__ , max_length=len(a__ ) , padding=a__ , return_tensors="""pt""" )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__snake_case :Any = batch.input_ids.tolist()[0]
self.assertListEqual(a__ , a__ )
@require_torch
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :Union[str, Any] = tokenizer(a__ , padding=a__ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , a__ )
self.assertIn("""attention_mask""" , a__ )
self.assertNotIn("""labels""" , a__ )
self.assertNotIn("""decoder_attention_mask""" , a__ )
@require_torch
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :Union[str, Any] = tokenizer(text_target=a__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def __lowercase ( self ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :Any = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=a__ , truncation=a__ , return_tensors="""pt""" )
self.assertIsInstance(a__ , a__ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = ["""A long paragraph for summarization."""]
__snake_case :Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :Union[str, Any] = tokenizer(a__ , return_tensors="""pt""" )
__snake_case :int = tokenizer(text_target=a__ , return_tensors="""pt""" )
__snake_case :Union[str, Any] = inputs["""input_ids"""]
__snake_case :List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowercase ( self ) -> str:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case :Dict = ["""Summary of the text.""", """Another summary."""]
__snake_case :Optional[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__snake_case :int = tokenizer(a__ , padding=a__ )
__snake_case :Any = [[0] * len(a__ ) for x in encoded_output["""input_ids"""]]
__snake_case :Dict = tokenizer.pad(a__ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , a__ )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case :List[str] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case :Optional[int] = self.tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case :Dict = """A, <mask> AllenNLP sentence."""
__snake_case :str = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
__snake_case :int = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__snake_case :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__snake_case :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 455
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE = True
except ImportError:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , *lowerCAmelCase ):
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def A__ ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(lowerCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase , extra_context=lowerCAmelCase , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCAmelCase ):
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(lowerCAmelCase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(lowerCAmelCase , "w" ) as new_file:
with open(lowerCAmelCase ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase , lowerCAmelCase )
# Remove original file
remove(lowerCAmelCase )
# Move new file
move(lowerCAmelCase , lowerCAmelCase )
def skip_units(lowerCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase ):
with open(lowerCAmelCase ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase )
remove(lowerCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCAmelCase )
| 579
| 0
|
from collections import defaultdict
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case : List[Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
snake_case : Optional[Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case : List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case : int = self.count_ways_until(SCREAMING_SNAKE_CASE_ ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
snake_case : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
snake_case__ : str = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
snake_case__ : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 705
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase ( __A : bytes , __A : int ) -> np.array:
'''simple docstring'''
snake_case : List[str] = f"""{sampling_rate}"""
snake_case : Union[str, Any] = """1"""
snake_case : List[str] = """f32le"""
snake_case : Optional[Any] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case : str = ffmpeg_process.communicate(__A )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case : int = output_stream[0]
snake_case : Tuple = np.frombuffer(__A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def lowercase ( __A : int , __A : float , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = f"""{sampling_rate}"""
snake_case : int = """1"""
if format_for_conversion == "s16le":
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
snake_case : Dict = platform.system()
if system == "Linux":
snake_case : List[str] = """alsa"""
snake_case : Union[str, Any] = """default"""
elif system == "Darwin":
snake_case : Optional[int] = """avfoundation"""
snake_case : str = """:0"""
elif system == "Windows":
snake_case : List[str] = """dshow"""
snake_case : Union[str, Any] = """default"""
snake_case : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case : Optional[Any] = _ffmpeg_stream(__A , __A )
for item in iterator:
yield item
def lowercase ( __A : int , __A : float , __A : Optional[int] = None , __A : Optional[Union[Tuple[float, float], float]] = None , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
if stream_chunk_s is not None:
snake_case : List[str] = stream_chunk_s
else:
snake_case : Tuple = chunk_length_s
snake_case : Optional[Any] = ffmpeg_microphone(__A , __A , format_for_conversion=__A )
if format_for_conversion == "s16le":
snake_case : List[Any] = np.intaa
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : List[Any] = np.floataa
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
snake_case : Tuple = chunk_length_s / 6
snake_case : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__A , (int, float) ):
snake_case : int = [stride_length_s, stride_length_s]
snake_case : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case : str = datetime.datetime.now()
snake_case : Tuple = datetime.timedelta(seconds=__A )
for item in chunk_bytes_iter(__A , __A , stride=(stride_left, stride_right) , stream=__A ):
# Put everything back in numpy scale
snake_case : List[str] = np.frombuffer(item["""raw"""] , dtype=__A )
snake_case : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase ( __A : Optional[Any] , __A : int , __A : Tuple[int, int] , __A : bool = False ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = b""""""
snake_case , snake_case : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
snake_case : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(__A ) < chunk_len:
snake_case : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__A ) >= chunk_len:
# We are flushing the accumulator
snake_case : str = (_stride_left, stride_right)
snake_case : str = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case : Optional[Any] = False
yield item
snake_case : int = stride_left
snake_case : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__A ) > stride_left:
snake_case : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case : Tuple = False
yield item
def lowercase ( __A : Optional[int] , __A : int ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = 2**24 # 16Mo
try:
with subprocess.Popen(__A , stdout=subprocess.PIPE , bufsize=__A ) as ffmpeg_process:
while True:
snake_case : Union[str, Any] = ffmpeg_process.stdout.read(__A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 315
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase ( lowerCamelCase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RetriBertTokenizer
lowercase = ['''input_ids''', '''attention_mask''']
def __init__(self : List[str] ,SCREAMING_SNAKE_CASE_ : Dict=None ,SCREAMING_SNAKE_CASE_ : Optional[Any]=None ,SCREAMING_SNAKE_CASE_ : List[Any]=True ,SCREAMING_SNAKE_CASE_ : Any="[UNK]" ,SCREAMING_SNAKE_CASE_ : int="[SEP]" ,SCREAMING_SNAKE_CASE_ : Dict="[PAD]" ,SCREAMING_SNAKE_CASE_ : Dict="[CLS]" ,SCREAMING_SNAKE_CASE_ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE_ : List[str]=True ,SCREAMING_SNAKE_CASE_ : Optional[Any]=None ,**SCREAMING_SNAKE_CASE_ : Tuple ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(__snake_case ,normalizer_state.pop('''type''' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**__snake_case )
lowerCAmelCase = do_lower_case
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : Dict=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase (self : Tuple ,SCREAMING_SNAKE_CASE_ : List[int] ,SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 535
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''lxmert'''
UpperCAmelCase__ : Any = {}
def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 335
| 0
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ConsistencyModelPipeline
UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCAmelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any]=False ):
if class_cond:
_UpperCamelCase = self.dummy_cond_unet
else:
_UpperCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
_UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : int , _A : Optional[int] , _A : Any=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = ConsistencyModelPipeline(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components(class_cond=_A )
_UpperCamelCase = ConsistencyModelPipeline(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 0
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = ConsistencyModelPipeline(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 1
_UpperCamelCase = None
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components(class_cond=_A )
_UpperCamelCase = ConsistencyModelPipeline(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 1
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Optional[Any] , _A : Tuple=0 , _A : Optional[Any]=False , _A : Optional[Any]="cpu" , _A : Union[str, Any]=torch.floataa , _A : Dict=(1, 3, 64, 64) ):
_UpperCamelCase = torch.manual_seed(_A )
_UpperCamelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_UpperCamelCase = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A )
_UpperCamelCase = latents
return inputs
def UpperCamelCase_ ( self : str , _A : Tuple=0 , _A : Tuple="cpu" , _A : Tuple=torch.floataa , _A : str=(1, 3, 64, 64) ):
if type(_A ) == str:
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
return latents
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
_UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
_UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = 1
_UpperCamelCase = None
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
_UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs(get_fixed_latents=_A , device=_A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
_UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_inputs(get_fixed_latents=_A , device=_A )
_UpperCamelCase = 1
_UpperCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
_UpperCamelCase = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 703
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 95
|
"""simple docstring"""
def A ( __snake_case: int ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__magic_name__ = 1
__magic_name__ = 1
while repunit:
__magic_name__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( __snake_case: int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__magic_name__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 545
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ = get_tests_dir("""fixtures""")
a__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
a__ = get_tests_dir("""fixtures/dummy-config.json""")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = 0
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""")
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__).to_dict()
config_dict.pop("""feature_extractor_type""")
snake_case__ = WavaVecaFeatureExtractor(**UpperCamelCase__)
# save in new folder
model_config.save_pretrained(UpperCamelCase__)
config.save_pretrained(UpperCamelCase__)
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__)
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string())
self.assertTrue("""_processor_class""" not in dict_as_saved)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : str):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , """bert-base is not a local folder and is not a valid model identifier"""):
snake_case__ = AutoFeatureExtractor.from_pretrained("""bert-base""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"""):
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , revision="""aaaaaa""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case__ = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""")
def __magic_name__ ( self : Any):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""")
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__)
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase__)
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase__)
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__):
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case__ = CustomFeatureExtractor.from_pretrained(UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase__)
snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = True
try:
AutoConfig.register("""custom""" , UpperCamelCase__)
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__)
# If remote code is not set, the default is to use local
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""")
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
snake_case__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""")
self.assertTrue(not hasattr(UpperCamelCase__ , """is_local"""))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 99
|
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ):
snake_case__ = a
snake_case__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
snake_case__ = mid
else:
snake_case__ = mid
snake_case__ = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 99
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: List[str] = parent
snake_case: List[str] = batch_size
snake_case: Optional[int] = image_size
snake_case: int = num_channels
snake_case: Any = embeddings_size
snake_case: Optional[int] = hidden_sizes
snake_case: Optional[Any] = depths
snake_case: int = is_training
snake_case: Optional[int] = use_labels
snake_case: List[Any] = hidden_act
snake_case: Optional[int] = num_labels
snake_case: Union[str, Any] = scope
snake_case: Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: str = None
if self.use_labels:
snake_case: Any = ids_tensor([self.batch_size] , self.num_labels )
snake_case: List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Any = self.num_labels
snake_case: Any = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[str] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case: Optional[Any] = config_and_inputs
snake_case: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = RegNetModelTester(self )
snake_case: Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: str = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: Dict = [*signature.parameters.keys()]
snake_case: Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case: List[str] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case , snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case: Dict = layer_type
snake_case: List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Dict = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.default_image_processor
snake_case: Optional[Any] = prepare_img()
snake_case: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Any = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 329
|
'''simple docstring'''
import operator as op
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: Optional[Any] = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
snake_case: Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
else:
snake_case: Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
snake_case: Any = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
stack.append(
str(opr[x](int(__A ) , int(__A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 329
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( __a ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A_ : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = 0.0 , __magic_name__ = 50 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , A__ ):
A_ : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A_ : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A_ : Tuple = randn_tensor(A__ , generator=A__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A_ : Optional[Any] = self.unet(A__ , A__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A_ : List[str] = self.scheduler.step(
A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ ).prev_sample
A_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
A_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : str = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 700
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCAmelCase__ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(lowercase , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(lowercase ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> Any:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowercase )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 514
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 514
| 1
|
a = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def UpperCamelCase_( __magic_name__ : dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = set()
# keep track of all the paths to be checked
_lowerCAmelCase :Optional[int] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCAmelCase :List[str] = queue.pop(0 )
# get the last node from the path
_lowerCAmelCase :Tuple = path[-1]
if node not in explored:
_lowerCAmelCase :Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
new_path.append(__magic_name__ )
queue.append(__magic_name__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__magic_name__ )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase_( __magic_name__ : dict , __magic_name__ : Optional[int] , __magic_name__ : Dict ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCAmelCase :Union[str, Any] = [start]
_lowerCAmelCase :Optional[Any] = set(__magic_name__ )
# Keep tab on distances from `start` node.
_lowerCAmelCase :List[str] = {start: 0, target: -1}
while queue:
_lowerCAmelCase :Union[str, Any] = queue.pop(0 )
if node == target:
_lowerCAmelCase :Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__magic_name__ )
queue.append(__magic_name__ )
_lowerCAmelCase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 382
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase_( __magic_name__ : int = 1000000 , __magic_name__ : int = 10 ):
"""simple docstring"""
_lowerCAmelCase :defaultdict = defaultdict(__magic_name__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCAmelCase :Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowerCAmelCase :List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__magic_name__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 382
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = inspect.getfile(accelerate.test_utils )
snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = Accelerator()
lowerCamelCase__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase__ : List[str] = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase__ : Union[str, Any] = """"""
lowerCamelCase__ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case : List[str] = True
except ImportError:
snake_case : Optional[Any] = False
snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowercase ( __lowerCAmelCase : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case_ (lowerCamelCase_ ):
@staticmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Optional[Any]:
a__ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=__snake_case ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=__snake_case ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__snake_case )
def __init__( self :Optional[int] ,__snake_case :bool ,__snake_case :str ,__snake_case :Dict=None ,*__snake_case :Optional[int] ) -> Dict:
a__ = testing
a__ = testing_file
a__ = path
def lowerCamelCase__( self :List[Any] ) -> Any:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
a__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(__snake_case ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
a__ = (
Path(__snake_case ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
a__ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__snake_case ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
a__ = json.load(__snake_case )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=__snake_case ,extra_context=__snake_case ,)
a__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
a__ = json.load(__snake_case )
a__ = configuration['lowercase_modelname']
a__ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
a__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
a__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
a__ = 'Flax' in generate_tensorflow_pytorch_and_flax
a__ = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__snake_case ,exist_ok=__snake_case )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=__snake_case )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(__snake_case :Tuple ):
with open(__snake_case ,'r' ) as f:
a__ = f.readlines()
with open(__snake_case ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__snake_case )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__snake_case :str ,__snake_case :str ,__snake_case :List[str] ):
# Create temp file
a__ , a__ = mkstemp()
a__ = False
with fdopen(__snake_case ,'w' ) as new_file:
with open(__snake_case ) as old_file:
for line in old_file:
new_file.write(__snake_case )
if line_to_copy_below in line:
a__ = True
for line_to_copy in lines_to_copy:
new_file.write(__snake_case )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(__snake_case ,__snake_case )
# Remove original file
remove(__snake_case )
# Move new file
move(__snake_case ,__snake_case )
def skip_units(__snake_case :Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__snake_case :int ):
with open(__snake_case ) as datafile:
a__ = []
a__ = False
a__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
a__ = line.split('"' )[1]
a__ = skip_units(__snake_case )
elif "# Below: " in line and "##" not in line:
a__ = line.split('"' )[1]
a__ = skip_units(__snake_case )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__snake_case ,__snake_case ,__snake_case )
a__ = []
elif "# Replace with" in line and "##" not in line:
a__ = []
elif "##" not in line:
lines_to_copy.append(__snake_case )
remove(__snake_case )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(__snake_case )
| 335
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__UpperCAmelCase : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_A = 10000
_A = None
_A = None
class _snake_case ( datasets.ArrowBasedBuilder ):
_A = ParquetConfig
def lowerCAmelCase_ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ ,(str, list, tuple) ):
snake_case__ :List[Any] = data_files
if isinstance(A__ ,A__ ):
snake_case__ :Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
snake_case__ :Any = []
for split_name, files in data_files.items():
if isinstance(A__ ,A__ ):
snake_case__ :List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ :Union[str, Any] = [dl_manager.iter_files(A__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A__ ):
with open(A__ ,"rb" ) as f:
snake_case__ :Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(A__ ) )
break
splits.append(datasets.SplitGenerator(name=A__ ,gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ :int = table_cast(A__ ,self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
with open(A__ ,"rb" ) as f:
snake_case__ :List[str] = pq.ParquetFile(A__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
snake_case__ :Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(A__ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(A__ )}: {e}' )
raise
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] =IFImgaImgSuperResolutionPipeline
_UpperCAmelCase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_UpperCAmelCase : int =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCAmelCase : Any =PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCAmelCase ( self : List[str] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(lowerCAmelCase )
else:
A_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
A_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
A_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCAmelCase ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCAmelCase ( self : Optional[int] ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 452
|
import os
import sys
_lowercase : List[str] =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_lowercase : Optional[int] =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoConfig.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoTokenizer.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoModel.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
| 364
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """ViltImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Union[str, Any] , _lowercase : Dict=None , _lowercase : str=None , **_lowercase : Any ) -> Optional[Any]:
_lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
_lowercase = kwargs.pop("feature_extractor" )
_lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
_lowercase = self.image_processor
def __call__( self : Dict , _lowercase : str , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : Any , ) -> BatchEncoding:
_lowercase = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
_lowercase = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def _lowerCamelCase ( self : Any , *_lowercase : str , **_lowercase : int ) -> Any:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : List[str] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ) -> Tuple:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def _lowerCamelCase ( self : Union[str, Any] ) -> Any:
_lowercase = self.tokenizer.model_input_names
_lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self : Dict ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : str ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor
| 227
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__UpperCamelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any]=None ) -> str:
_lowercase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , _lowercase , getattr(_lowercase , _lowercase ) )
_lowercase = module._original_module if isinstance(_lowercase , _PatchedModuleObj ) else module
class UpperCAmelCase_ :
snake_case_ = []
def __init__( self : str , _lowercase : int , _lowercase : str , _lowercase : List[str] , _lowercase : Union[str, Any]=None ) -> str:
_lowercase = obj
_lowercase = target
_lowercase = new
_lowercase = target.split("." )[0]
_lowercase = {}
_lowercase = attrs or []
def __enter__( self : Any ) -> List[str]:
*_lowercase , _lowercase = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_lowercase ) ):
try:
_lowercase = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowercase = getattr(self.obj , _lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowercase = obj_attr
# patch at top level
setattr(self.obj , _lowercase , _PatchedModuleObj(_lowercase , attrs=self.attrs ) )
_lowercase = getattr(self.obj , _lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_lowercase , _lowercase , _PatchedModuleObj(getattr(_lowercase , _lowercase , _lowercase ) , attrs=self.attrs ) )
_lowercase = getattr(_lowercase , _lowercase )
# finally set the target attribute
setattr(_lowercase , _lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowercase = getattr(import_module(".".join(_lowercase ) ) , _lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _lowercase ) is attr_value:
_lowercase = getattr(self.obj , _lowercase )
setattr(self.obj , _lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowercase = globals()["__builtins__"][target_attr]
setattr(self.obj , _lowercase , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : int , *_lowercase : Dict ) -> str:
for attr in list(self.original ):
setattr(self.obj , _lowercase , self.original.pop(_lowercase ) )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
self.__enter__()
self._active_patches.append(self )
def _lowerCamelCase ( self : List[str] ) -> Tuple:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 227
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Download this model to make sure it's in the cache.
lowercase__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=lowerCamelCase ) as mock_head:
lowercase__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowercase__ ( self : int ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Download this model to make sure it's in the cache.
lowercase__ = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=lowerCamelCase ) as mock_head:
lowercase__ = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
lowercase__ = tempfile.mktemp()
with open(lowerCamelCase, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', lowerCamelCase )
lowercase__ = AlbertTokenizer.from_pretrained(lowerCamelCase )
finally:
os.remove(lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowercase__ ( cls : Tuple ):
'''simple docstring'''
lowercase__ = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str] ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizer(lowerCamelCase )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase, repo_id='''test-tokenizer''', push_to_hub=lowerCamelCase, use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizer(lowerCamelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=lowerCamelCase, use_auth_token=self._token )
lowercase__ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def lowercase__ ( self : int ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__ = CustomTokenizer(lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
lowercase__ = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""", trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__ = BertTokenizerFast.from_pretrained(lowerCamelCase )
bert_tokenizer.save_pretrained(lowerCamelCase )
lowercase__ = CustomTokenizerFast.from_pretrained(lowerCamelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
lowercase__ = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""", trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
lowercase__ = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""", use_fast=lowerCamelCase, trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowercase__ = Trie()
lowercase__ = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase, ['''AB''', '''C'''] )
| 183
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AudioClassificationPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
# test with a raw waveform
lowercase__ = np.zeros((34_000,) )
lowercase__ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = examples
lowercase__ = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
lowercase__ = audio_classifier(lowerCamelCase, top_k=1 )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
import datasets
# test with a local file
lowercase__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
lowercase__ = dataset[0]['''audio''']['''array''']
lowercase__ = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = np.ones((8_000,) )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
lowercase__ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase__ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import datasets
lowercase__ = '''superb/wav2vec2-base-superb-ks'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
lowercase__ = np.array(dataset[3]['''speech'''], dtype=np.floataa )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 183
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int=13 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=2_24 , lowerCAmelCase_ : Any=10_00 , lowerCAmelCase_ : Tuple=[3, 3, 6, 4] , lowerCAmelCase_ : Union[str, Any]=[48, 56, 1_12, 2_20] , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = num_labels
_a = image_size
_a = layer_depths
_a = embed_dims
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase_ , layer_scale_init_value=1e-5 , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = SwiftFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = self.num_labels
_a = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
((_a) , (_a) , (_a)) = self.prepare_config_and_inputs()
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_a = SwiftFormerModelTester(self )
_a = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = SwiftFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.hidden_states
_a = 8
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
def _config_zero_init(lowerCAmelCase_ : Optional[Any] ):
_a = copy.deepcopy(lowerCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , 1e-10 )
if isinstance(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ):
_a = _config_zero_init(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return configs_no_init
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_a = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 706
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case_ (UpperCamelCase : BertModel , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_a = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
_a = model.state_dict()
def to_tf_var_name(UpperCamelCase : str ):
for patt, repl in iter(UpperCamelCase ):
_a = name.replace(UpperCamelCase , UpperCamelCase )
return f'bert/{name}'
def create_tf_var(UpperCamelCase : np.ndarray , UpperCamelCase : str , UpperCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=UpperCamelCase , shape=tensor.shape , name=UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(UpperCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=UpperCamelCase , name=UpperCamelCase , session=UpperCamelCase )
tf.keras.backend.set_value(UpperCamelCase , UpperCamelCase )
_a = session.run(UpperCamelCase )
print(f'Successfully created {tf_name}: {np.allclose(UpperCamelCase , UpperCamelCase )}' )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase , os.path.join(UpperCamelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def snake_case_ (UpperCamelCase : Tuple=None ):
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase , required=UpperCamelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase , required=UpperCamelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase , required=UpperCamelCase , help='''Directory in which to save tensorflow model''' )
_a = parser.parse_args(UpperCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 377
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A: str = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = AlbertTokenizer
__lowerCAmelCase : Dict = AlbertTokenizerFast
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Tuple = True
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : List[str] = AlbertTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = """this is a test"""
UpperCAmelCase : Tuple = """this is a test"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = """<pad>"""
UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 30000 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : List[Any] = self.get_tokenizer()
UpperCAmelCase : str = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : List[str] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = AlbertTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [48, 25, 21, 1289] )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = AlbertTokenizer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Dict = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 160
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Tuple = num_choices
UpperCAmelCase : int = relative_attention
UpperCAmelCase : Optional[Any] = position_biased_input
UpperCAmelCase : List[Any] = pos_att_type
UpperCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : int = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_config()
UpperCAmelCase : Optional[int] = 300
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = DebertaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = DebertaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Dict = DebertaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = DebertaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = DebertaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Dict = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = DebertaModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = DebertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 160
| 1
|
import numpy as np
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = np.random.default_rng(seed=SCREAMING_SNAKE_CASE )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCamelCase :str = 6 * key_len
# Measurement basis for Alice's qubits.
__UpperCamelCase :List[Any] = rng.integers(2 , size=SCREAMING_SNAKE_CASE )
# The set of states Alice will prepare.
__UpperCamelCase :Union[str, Any] = rng.integers(2 , size=SCREAMING_SNAKE_CASE )
# Measurement basis for Bob's qubits.
__UpperCamelCase :Union[str, Any] = rng.integers(2 , size=SCREAMING_SNAKE_CASE )
# Quantum Circuit to simulate BB84
__UpperCamelCase :Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE ):
if alice_state[index] == 1:
bbaa_circ.x(SCREAMING_SNAKE_CASE )
if alice_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE ):
if bob_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1 , seed_simulator=SCREAMING_SNAKE_CASE )
# Returns the result of measurement.
__UpperCamelCase :int = job.result().get_counts(SCREAMING_SNAKE_CASE ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCamelCase :List[str] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCamelCase :int = gen_key[:key_len] if len(SCREAMING_SNAKE_CASE ) >= key_len else gen_key.ljust(SCREAMING_SNAKE_CASE , '''0''' )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 452
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
__UpperCamelCase :int = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__UpperCamelCase :Union[str, Any] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 452
| 1
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
snake_case : Any = test_results.split(" " )
snake_case : int = 0
snake_case : List[str] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case : Optional[int] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
snake_case : Optional[int] = {}
snake_case : Any = None
snake_case : Optional[int] = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowerCamelCase_ ):
snake_case : List[str] = True
snake_case : Union[str, Any] = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
snake_case : Optional[int] = line
snake_case : List[str] = False
return failures
class _a :
def __init__( self : Union[str, Any] , _lowercase : str , _lowercase : Dict ) -> List[Any]:
snake_case : Union[str, Any] = title
snake_case : List[str] = doc_test_results["time_spent"].split("," )[0]
snake_case : Tuple = doc_test_results["success"]
snake_case : List[str] = doc_test_results["failures"]
snake_case : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case : int = doc_test_results
@property
def __lowercase ( self : List[str] ) -> str:
snake_case : Any = [self._time_spent]
snake_case : Dict = 0
for time in time_spent:
snake_case : List[Any] = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowercase ) == 1:
snake_case : Tuple = [0, 0, time_parts[0]]
snake_case , snake_case , snake_case : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
snake_case , snake_case , snake_case : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_lowercase )}h{int(_lowercase )}m{int(_lowercase )}s'''
@property
def __lowercase ( self : Optional[Any] ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self : Union[str, Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def __lowercase ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def __lowercase ( self : Union[str, Any] ) -> Dict:
snake_case : int = 40
snake_case : str = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowercase , _lowercase )}
snake_case : str = ""
for category, failures in category_failures.items():
if len(_lowercase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowercase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def __lowercase ( self : List[Any] ) -> str:
snake_case : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowercase )
@staticmethod
def __lowercase ( ) -> Optional[Any]:
snake_case : Tuple = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowercase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowercase , )
def __lowercase ( self : Optional[int] ) -> str:
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
snake_case : Dict = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed."
snake_case : Dict = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowercase , )
def __lowercase ( self : List[str] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple , _lowercase : Dict ) -> Tuple:
snake_case : List[str] = ""
for key, value in failures.items():
snake_case : Optional[int] = value[:200] + " [Truncated]" if len(_lowercase ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
snake_case : Union[str, Any] = job_name
snake_case : List[str] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
snake_case : int = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
snake_case : Tuple = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
snake_case : str = sorted(self.doc_test_results.items() , key=lambda _lowercase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
snake_case : Any = F'''*Num failures* :{len(job_result["failed"] )} \n'''
snake_case : Dict = job_result["failures"]
snake_case : Dict = self.get_reply_blocks(_lowercase , _lowercase , _lowercase , text=_lowercase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'''Results for {job}''' , blocks=_lowercase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : Optional[int] = os.environ["GITHUB_RUN_ID"]
snake_case : Union[str, Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
snake_case : List[Any] = requests.get(lowerCamelCase_ ).json()
snake_case : Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case : Tuple = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
snake_case : int = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowerCamelCase_ )
return {}
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
snake_case : Tuple = {}
if os.path.exists(lowerCamelCase_ ):
snake_case : Tuple = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding="utf-8" ) as f:
snake_case : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.''' ) from e
return _artifact
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
class _a :
def __init__( self : int , _lowercase : str ) -> Tuple:
snake_case : str = name
snake_case : Optional[Any] = []
def __str__( self : List[Any] ) -> List[Any]:
return self.name
def __lowercase ( self : int , _lowercase : str ) -> str:
self.paths.append({"name": self.name, "path": path} )
snake_case : Dict[str, Artifact] = {}
snake_case : int = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case : int = directory
if artifact_name not in _available_artifacts:
snake_case : int = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
A = get_job_links()
A = retrieve_available_artifacts()
A = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A = github_actions_job_links.get('run_doctests')
A = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A , A , A = handle_test_results(artifact['stats'])
A = failed
A = success
A = time_spent[1:-1] + ', '
A = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A = line.replace('FAILED ', '')
A = line.split()[0].replace('\n', '')
if "::" in line:
A , A = line.split('::')
else:
A , A = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A = all_failures[test] if test in all_failures else 'N/A'
A = failure
break
A = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 449
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A = False
class _a ( unittest.TestCase):
def __lowercase ( self : str ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : List[str] ) -> Any:
return 12
@property
def __lowercase ( self : List[str] ) -> Dict:
return 12
@property
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def __lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
snake_case : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __lowercase ( self : str ) -> List[Any]:
snake_case : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def __lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : List[Any] = 12
snake_case : Dict = 12
snake_case : Tuple = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case : Dict = TransformeraDModel(**_lowercase )
return model
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : Optional[Any] = "cpu"
snake_case : Optional[int] = self.dummy_vqvae
snake_case : Dict = self.dummy_text_encoder
snake_case : Tuple = self.dummy_tokenizer
snake_case : List[Any] = self.dummy_transformer
snake_case : List[Any] = VQDiffusionScheduler(self.num_embed )
snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case : Dict = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = "teddy bear playing in the pool"
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[int] = output.images
snake_case : int = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : List[str] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
snake_case : List[str] = "cpu"
snake_case : Dict = self.dummy_vqvae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Optional[Any] = self.dummy_tokenizer
snake_case : int = self.dummy_transformer
snake_case : str = VQDiffusionScheduler(self.num_embed )
snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case : List[str] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = "teddy bear playing in the pool"
snake_case : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[Any] = output.images
snake_case : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ) -> Tuple:
snake_case : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case : Tuple = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Optional[int] = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , )
snake_case : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 449
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 507
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_A = TypeVar("""_T""")
class _lowerCamelCase ( Generic[_T] ):
def __init__( self : Optional[Any] , UpperCamelCase : Iterable[_T] | None = None ) -> None:
"""simple docstring"""
lowerCAmelCase__ : list[_T] = list(iterable or [] )
lowerCAmelCase__ : list[_T] = []
def __len__( self : str ) -> int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ) -> str:
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : _T ) -> None:
"""simple docstring"""
self._stacka.append(UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> _T:
"""simple docstring"""
lowerCAmelCase__ : Dict = self._stacka.pop
lowerCAmelCase__ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 507
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ):
"""simple docstring"""
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase__ , padding="max_length" if pad_to_max_length else None , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ):
"""simple docstring"""
_UpperCAmelCase = input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ , a_ , a_ , a_="train" , a_=None , a_=None , a_=None , a_="" , ) -> List[Any]:
super().__init__()
_UpperCAmelCase = Path(a_ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(a_ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , a_ ) -> Dict[str, torch.Tensor]:
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , a_ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , a_ ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a_ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , a_ ) else self.tokenizer
_UpperCAmelCase = encode_line(a_ , a_ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(a_ , a_ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( a_ ) -> Optional[int]:
return [len(a_ ) for x in Path(a_ ).open().readlines()]
def _a ( self , a_ ) -> Dict[str, torch.Tensor]:
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(a_ , a_ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(a_ , a_ , attention_mask=a_ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__magic_name__ = getLogger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = get_git_info()
save_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "git_log.json" ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = git.Repo(search_parent_directories=UpperCamelCase__ )
_UpperCAmelCase = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return list(map(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "wb" ) as f:
return pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def remove_articles(UpperCamelCase__ ):
return re.sub(r"\b(a|an|the)\b" , " " , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_UpperCAmelCase = 0
for hypo, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return model_prefix.startswith("rag" )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and not hasattr(UpperCamelCase__ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
continue
_UpperCAmelCase = p if hasattr(UpperCamelCase__ , UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__ , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
return hparams, config
| 657
|
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657
| 1
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A : int = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A : Tuple = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class A :
'''simple docstring'''
def __init__( self : int ) -> int:
"""simple docstring"""
A__ = WATERMARK_BITS
A__ = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : torch.FloatTensor ) -> Tuple:
"""simple docstring"""
if images.shape[-1] < 2_56:
return images
A__ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ = [self.encoder.encode(__lowerCAmelCase , """dwtDct""" ) for image in images]
A__ = torch.from_numpy(np.array(__lowerCAmelCase ) ).permute(0 , 3 , 1 , 2 )
A__ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 247
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 247
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Tuple = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase : Optional[Any] = bs[:]
lowerCamelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
lowerCamelCase : List[Any] = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = set()
lowerCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : List[Any] = char
return pairs
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
lowerCamelCase : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
lowerCamelCase : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
lowerCamelCase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
lowerCamelCase : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
lowerCamelCase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="utf-8" ) as vocab_handle:
lowerCamelCase : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase : int = errors # how to handle errors in decoding
lowerCamelCase : List[Any] = bytes_to_unicode()
lowerCamelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="utf-8" ) as merges_handle:
lowerCamelCase : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase : Tuple = {}
lowerCamelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase : Union[str, Any] = tuple(lowerCamelCase_ )
lowerCamelCase : List[Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
lowerCamelCase : Tuple = min(lowerCamelCase_ , key=lambda __A : self.bpe_ranks.get(lowerCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = bigram
lowerCamelCase : Any = []
lowerCamelCase : Any = 0
while i < len(lowerCamelCase_ ):
try:
lowerCamelCase : Any = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : List[str] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : List[str] = tuple(lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
lowerCamelCase : List[str] = get_pairs(lowerCamelCase_ )
lowerCamelCase : Optional[int] = " ".join(lowerCamelCase_ )
lowerCamelCase : Optional[int] = word
return word
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = []
for token in re.findall(self.pat , lowerCamelCase_ ):
lowerCamelCase : str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , __A ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = "".join(lowerCamelCase_ )
lowerCamelCase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Optional[int] = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + "\n" )
lowerCamelCase : Union[str, Any] = 0
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase : Any = token_index
writer.write(" ".join(lowerCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __A , __A=False , **__A ):
"""simple docstring"""
lowerCamelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
lowerCamelCase : Union[str, Any] = " " + text
return (text, kwargs)
def _snake_case ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : List[str] = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
lowerCamelCase : Optional[Any] = len(lowerCamelCase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 340
|
"""simple docstring"""
from __future__ import annotations
def _A ( _a : list[float] , _a : list[float] ):
"""simple docstring"""
A = sorted(numsa + numsa )
A , A = divmod(len(_a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
UpperCAmelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 617
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Optional[Any]:
_lowercase = model.config
_lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowercase = MBartConfig(
is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Any:
if "encoder.model" in name:
_lowercase = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowercase = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowercase = '''encoder.''' + name
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowercase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowercase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowercase = '''encoder.layernorm.bias'''
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :int ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
_lowercase = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_lowercase = key.split('.' )
_lowercase = int(key_split[3] )
_lowercase = int(key_split[5] )
_lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowercase = val[:dim, :]
_lowercase = val[dim : dim * 2, :]
_lowercase = val[-dim:, :]
else:
_lowercase = val[:dim]
_lowercase = val[dim : dim * 2]
_lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any]=None , snake_case__ :Any=False ) -> List[Any]:
_lowercase = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
_lowercase = get_configs(snake_case__ )
_lowercase = DonutSwinModel(snake_case__ )
_lowercase = MBartForCausalLM(snake_case__ )
_lowercase = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
_lowercase = original_model.state_dict()
_lowercase = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
_lowercase = load_dataset('hf-internal-testing/example-documents' )
_lowercase = dataset['''test'''][0]['''image'''].convert('RGB' )
_lowercase = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ )
_lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowercase = DonutProcessor(snake_case__ , snake_case__ )
_lowercase = processor(snake_case__ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowercase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowercase = '''When is the coffee break?'''
_lowercase = task_prompt.replace('{user_input}' , snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowercase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowercase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowercase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowercase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowercase = '''hello world'''
else:
raise ValueError('Model name not supported' )
_lowercase = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors='pt' )[
'''input_ids'''
]
_lowercase = original_model.encoder.model.patch_embed(snake_case__ )
_lowercase = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
# verify encoder hidden states
_lowercase = original_model.encoder(snake_case__ )
_lowercase = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-2 )
# verify decoder hidden states
_lowercase = original_model(snake_case__ , snake_case__ , snake_case__ ).logits
_lowercase = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
snake_case = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718
|
import string
import numpy
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE_ : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 3_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.vectorize(UpperCAmelCase )
def __init__( self : Optional[Any] ,__A : numpy.ndarray ) -> None:
_lowercase = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase = encrypt_key.shape[0]
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> int:
return self.key_string.index(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : int ) -> str:
return self.key_string[round(__A )]
def __UpperCAmelCase ( self : str ) -> None:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = len(self.key_string )
if greatest_common_divisor(__A ,len(self.key_string ) ) != 1:
_lowercase = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__A )
def __UpperCAmelCase ( self : Any ,__A : str ) -> str:
_lowercase = [char for char in text.upper() if char in self.key_string]
_lowercase = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : str ) -> str:
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCAmelCase ( self : List[Any] ) -> numpy.ndarray:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase = i
break
_lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> str:
_lowercase = self.make_decrypt_key()
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = int(input('Enter the order of the encryption key: ' ) )
_lowercase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case__ ):
_lowercase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_lowercase = HillCipher(numpy.array(snake_case__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_lowercase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 535
| 0
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9
SCREAMING_SNAKE_CASE_ : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ : Dict = kruskal(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase_ ) == sorted(lowerCamelCase_ )
| 105
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__a = text_generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
] , )
__a = text_generator.model.config.eos_token_id
__a = '''<pad>'''
__a = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''Hello I believe in'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__a = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__a = text_generator(_snake_case , stop_sequence=''' fe''' )
self.assertEqual(_snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = text_generator.model
__a = text_generator.tokenizer
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = pipeline(task='''text-generation''' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a = text_generator('''''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__a = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_snake_case , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = '''Hello world'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__a = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__a = logging.get_logger('''transformers.generation.utils''' )
__a = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out )
| 219
| 0
|
def A__ ( lowercase: int ) -> None:
A : str =generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx], end=' ' )
else:
print(triangle[row_idx][col_idx], end='' )
print()
def A__ ( lowercase: int ) -> list[list[int]]:
if not isinstance(lowercase, lowercase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
A : list[list[int]] =[]
for current_row_idx in range(lowercase ):
A : List[str] =populate_current_row(lowercase, lowercase )
triangle.append(lowercase )
return triangle
def A__ ( lowercase: list[list[int]], lowercase: int ) -> list[int]:
A : Tuple =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A : List[str] =1, 1
for current_col_idx in range(1, lowercase ):
calculate_current_element(
lowercase, lowercase, lowercase, lowercase )
return current_row
def A__ ( lowercase: list[list[int]], lowercase: list[int], lowercase: int, lowercase: int, ) -> None:
A : Optional[int] =triangle[current_row_idx - 1][current_col_idx - 1]
A : Optional[Any] =triangle[current_row_idx - 1][current_col_idx]
A : int =above_to_left_elt + above_to_right_elt
def A__ ( lowercase: int ) -> list[list[int]]:
if not isinstance(lowercase, lowercase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
A : list[list[int]] =[[1]]
for row_index in range(1, lowercase ):
A : Dict =[0] + result[-1] + [0]
A : Union[str, Any] =row_index + 1
# Calculate the number of distinct elements in a row
A : Any =sum(divmod(lowercase, 2 ) )
A : Any =[
temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 )
]
A : Dict =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A : Any =row_first_half + row_second_half
result.append(lowercase )
return result
def A__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase: Callable, lowercase: int ) -> None:
A : int =F'{func.__name__}({value})'
A : Optional[int] =timeit(F'__main__.{call}', setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase, lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 709
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase_ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
lowercase_ = json.load(f)
@require_torch
class A__ ( unittest.TestCase ):
def lowercase ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[int] = F'''facebook/wmt19-{pair}'''
__magic_name__ : Any = self.get_tokenizer(lowerCamelCase )
__magic_name__ : List[str] = self.get_model(lowerCamelCase )
__magic_name__ : Optional[int] = bleu_data[pair]['''src''']
__magic_name__ : Optional[Any] = bleu_data[pair]['''tgt''']
__magic_name__ : Tuple = tokenizer(lowerCamelCase , return_tensors='''pt''' , truncation=lowerCamelCase , padding='''longest''' ).to(lowerCamelCase )
__magic_name__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__magic_name__ : Optional[int] = tokenizer.batch_decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
__magic_name__ : Dict = calculate_bleu(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] , lowerCamelCase )
| 154
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 0.0 , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
__magic_name__ : int = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
__magic_name__ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__magic_name__ : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : Union[str, Any] = {}
if accepts_eta:
__magic_name__ : Union[str, Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__magic_name__ : Dict = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__magic_name__ : List[Any] = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[str] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VAE
__magic_name__ : int = self.vqvae.decode(lowerCamelCase ).sample
__magic_name__ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 154
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.sql.Selectable"] , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Dict , ) -> List[Any]:
super().__init__(features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = Sql(
cache_dir=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , sql=SCREAMING_SNAKE_CASE__ , con=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Tuple = None
a_ : str = None
a_ : Optional[Any] = None
a_ : Dict = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , )
# Build dataset for splits
a_ : Optional[Any] = self.builder.as_dataset(
split='train' , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
a_ : List[str] = dataset
a_ : Optional[int] = name
a_ : int = con
a_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ : Any = num_proc
a_ : Union[str, Any] = to_sql_kwargs
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Any = self.to_sql_kwargs.pop('sql' , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.to_sql_kwargs.pop('con' , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.to_sql_kwargs.pop('index' , SCREAMING_SNAKE_CASE__ )
a_ : int = self._write(index=SCREAMING_SNAKE_CASE__ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
a_ , a_ , a_ : Optional[Any] = args
a_ : Dict = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
a_ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a_ : List[Any] = batch.to_pandas()
a_ : Optional[Any] = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return num_rows or len(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> int:
a_ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a_ , a_ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 443
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase__ ) , '''Tatoeba directory does not exist.''' )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
self.resolver.convert_models(['heb-eng'] )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ , a_ : Dict = self.resolver.write_model_card('opus-mt-he-en' , dry_run=SCREAMING_SNAKE_CASE__ )
assert mmeta["long_pair"] == "heb-eng"
| 443
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : Dict = LEDConfig
_A : Tuple = {}
_A : Union[str, Any] = '''gelu'''
def __init__( self : List[Any] , _a : Tuple , _a : List[Any]=13 , _a : Union[str, Any]=7 , _a : Any=True , _a : List[str]=False , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : int=4 , _a : List[Any]=37 , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : int=20 , _a : Optional[int]=2 , _a : List[Any]=1 , _a : List[Any]=0 , _a : str=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A_ ( self : int ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase__ = prepare_led_inputs_dict(_a , _a , _a )
UpperCamelCase__ = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
UpperCamelCase__ = global_attention_mask
return config, inputs_dict
def A_ ( self : str , _a : Any , _a : List[str] ):
UpperCamelCase__ = TFLEDModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(_a , attention_mask=_a )[0]
UpperCamelCase__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : int=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : List[str]=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_A : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = True
_A : int = False
_A : Optional[int] = False
_A : int = False
def A_ ( self : Optional[int] ):
UpperCamelCase__ = TFLEDModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : Any ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase__ = 2
UpperCamelCase__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase__ = True
UpperCamelCase__ = self.model_tester.seq_length
UpperCamelCase__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a : int ):
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a : Any ):
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def A_ ( self : List[str] ):
pass
def A_ ( self : Dict ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
return tf.constant(UpperCamelCase__, dtype=tf.intaa )
lowercase = 1E-4
@slow
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Any ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 , rtol=1E-3 )
| 240
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=16 , __lowerCAmelCase=[1, 2, 1] , __lowerCAmelCase=[2, 2, 4] , __lowerCAmelCase=2 , __lowerCAmelCase=2.0 , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=8 , __lowerCAmelCase=["stage1", "stage2", "stage3"] , __lowerCAmelCase=[1, 2, 3] , ):
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : List[Any] = image_size
UpperCamelCase_ : int = patch_size
UpperCamelCase_ : int = num_channels
UpperCamelCase_ : Union[str, Any] = embed_dim
UpperCamelCase_ : Dict = depths
UpperCamelCase_ : List[Any] = num_heads
UpperCamelCase_ : Union[str, Any] = window_size
UpperCamelCase_ : Optional[Any] = mlp_ratio
UpperCamelCase_ : Any = qkv_bias
UpperCamelCase_ : str = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : int = drop_path_rate
UpperCamelCase_ : Optional[Any] = hidden_act
UpperCamelCase_ : Any = use_absolute_embeddings
UpperCamelCase_ : List[Any] = patch_norm
UpperCamelCase_ : Optional[Any] = layer_norm_eps
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : List[str] = is_training
UpperCamelCase_ : List[Any] = scope
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = type_sequence_label_size
UpperCamelCase_ : Dict = encoder_stride
UpperCamelCase_ : Any = out_features
UpperCamelCase_ : str = out_indices
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[Any] = None
if self.use_labels:
UpperCamelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = MaskFormerSwinModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase_ : Tuple = model(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_ : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : str = MaskFormerSwinBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase_ : int = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__lowerCAmelCase ):
UpperCamelCase_ : str = ["""stem"""]
UpperCamelCase_ : int = MaskFormerSwinBackbone(config=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase_ : Optional[Any] = config_and_inputs
UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__a : List[str] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
__a : int = False
__a : Any = False
__a : Tuple = False
__a : Any = False
__a : List[str] = False
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = MaskFormerSwinModelTester(self )
UpperCamelCase_ : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ):
return
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Dict = model_class(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Any = [*signature.parameters.keys()]
UpperCamelCase_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCamelCase_ : str = outputs.hidden_states
UpperCamelCase_ : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# Swin has a different seq_length
UpperCamelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Dict = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Optional[int] = 3
UpperCamelCase_ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_ : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Any = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCAmelCase ):
UpperCamelCase_ : str = 0
return t
def check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase={} ):
with torch.no_grad():
UpperCamelCase_ : List[str] = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase_ : Tuple = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple()
def recursive_check(__lowerCAmelCase , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCAmelCase ) , set_nan_tensor_to_zero(__lowerCAmelCase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}. Dict has"
F" `nan`: {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}."
) , )
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase_ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Dict = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Dict = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"""output_hidden_states""": True} )
UpperCamelCase_ : Dict = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class A ( unittest.TestCase, SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__a : Optional[Any] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Optional[Any] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase_ : List[str] = backbone_class(__lowerCAmelCase )
backbone.to(__lowerCAmelCase )
backbone.eval()
UpperCamelCase_ : List[Any] = backbone(**__lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase_ : Optional[int] = backbone(**__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase_ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase_ : List[str] = backbone(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 705
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''', SCREAMING_SNAKE_CASE__, )
| 543
| 0
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Any =tmp_path / '''cache'''
snake_case : Dict ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : Dict =TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =tmp_path / '''cache'''
snake_case : Optional[int] ={'''text''': '''string'''}
snake_case : Dict =features.copy() if features else default_expected_features
snake_case : Tuple =(
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : Any =TextDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Any =tmp_path / '''cache'''
snake_case : str ={'''text''': '''string'''}
snake_case : Tuple =TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : str =text_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =[text_path]
snake_case : int =tmp_path / '''cache'''
snake_case : List[str] ={'''text''': '''string'''}
snake_case : str =TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=("train",) ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for split in splits:
snake_case : Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =tmp_path / '''cache'''
snake_case : List[str] ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : Union[str, Any] =TextDatasetReader({'''train''': text_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] =tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case : Optional[int] ={'''text''': '''string'''}
snake_case : Dict =features.copy() if features else default_expected_features
snake_case : str =(
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : Tuple =TextDatasetReader({'''train''': text_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if split:
snake_case : Tuple ={split: text_path}
else:
snake_case : List[Any] ='''train'''
snake_case : str ={'''train''': text_path, '''test''': text_path}
snake_case : List[str] =tmp_path / '''cache'''
snake_case : int ={'''text''': '''string'''}
snake_case : Optional[int] =TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 349
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _a ( lowerCamelCase_ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _a ( lowerCamelCase_ ):
snake_case : Union[str, Any] =np.max(_outputs , axis=-1 , keepdims=lowerCamelCase_ )
snake_case : Optional[int] =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'sigmoid'
__UpperCAmelCase = 'softmax'
__UpperCAmelCase = 'none'
@add_end_docstrings(
a_ , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = False
__UpperCAmelCase = ClassificationFunction.NONE
def __init__( self : List[str], **_snake_case : List[Any] ):
'''simple docstring'''
super().__init__(**_snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __snake_case ( self : List[Any], _snake_case : str=None, _snake_case : Union[str, Any]=None, _snake_case : Optional[int]="", **_snake_case : List[Any] ):
'''simple docstring'''
snake_case : int =tokenizer_kwargs
snake_case : str ={}
if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None:
snake_case : int =self.model.config.return_all_scores
if isinstance(_snake_case, _snake_case ) or top_k is None:
snake_case : int =top_k
snake_case : Optional[Any] =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', _snake_case, )
if return_all_scores:
snake_case : List[Any] =None
else:
snake_case : Dict =1
if isinstance(_snake_case, _snake_case ):
snake_case : List[str] =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case : Tuple =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple, *_snake_case : Union[str, Any], **_snake_case : int ):
'''simple docstring'''
snake_case : Dict =super().__call__(*_snake_case, **_snake_case )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case : Optional[int] ='''top_k''' not in kwargs
if isinstance(args[0], _snake_case ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __snake_case ( self : List[Any], _snake_case : List[str], **_snake_case : Dict ):
'''simple docstring'''
snake_case : Optional[Any] =self.framework
if isinstance(_snake_case, _snake_case ):
return self.tokenizer(**_snake_case, return_tensors=_snake_case, **_snake_case )
elif isinstance(_snake_case, _snake_case ) and len(_snake_case ) == 1 and isinstance(inputs[0], _snake_case ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=_snake_case, **_snake_case )
elif isinstance(_snake_case, _snake_case ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_snake_case, return_tensors=_snake_case, **_snake_case )
def __snake_case ( self : Tuple, _snake_case : Union[str, Any] ):
'''simple docstring'''
return self.model(**_snake_case )
def __snake_case ( self : Tuple, _snake_case : Optional[int], _snake_case : str=None, _snake_case : Any=1, _snake_case : Optional[int]=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case : Tuple =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case : str =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None:
snake_case : Tuple =self.model.config.function_to_apply
else:
snake_case : Optional[Any] =ClassificationFunction.NONE
snake_case : List[str] =model_outputs['''logits'''][0]
snake_case : Union[str, Any] =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case : Optional[int] =sigmoid(_snake_case )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case : Optional[Any] =softmax(_snake_case )
elif function_to_apply == ClassificationFunction.NONE:
snake_case : Union[str, Any] =outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case : int =[
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_snake_case )
]
if not _legacy:
dict_scores.sort(key=lambda _snake_case : x["score"], reverse=_snake_case )
if top_k is not None:
snake_case : List[Any] =dict_scores[:top_k]
return dict_scores
| 349
| 1
|
import os
from distutils.util import strtobool
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ):
'''simple docstring'''
for e in env_keys:
_lowerCAmelCase : Optional[Any] = int(os.environ.get(UpperCamelCase_ , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = os.environ.get(UpperCamelCase_ , str(UpperCamelCase_ ) )
return strtobool(UpperCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int="no" ):
'''simple docstring'''
_lowerCAmelCase : int = os.environ.get(UpperCamelCase_ , str(UpperCamelCase_ ) )
return value
| 196
|
from __future__ import annotations
class __snake_case :
def __init__( self : Union[str, Any] , _UpperCAmelCase : int = 0 ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : int = key
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCAmelCase : List[str] = """"""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCAmelCase : List[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 196
| 1
|
def a__ ( lowercase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ =sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase_ : Optional[Any] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class _UpperCAmelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , snake_case_ = " " ):
"""simple docstring"""
A_ : Any = sentence_delimiter
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return list(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = []
for sent_idx, sentence in enumerate(snake_case_ ):
chars.extend(self.process_string(snake_case_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase_ : int = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase_ : int = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase_ : List[str] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase_ : Optional[int] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
lowerCamelCase_ : Dict = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )["wer"]
A_ : Tuple = 0
A_ : Union[str, Any] = 0
for prediction, reference in zip(snake_case_ , snake_case_ ):
A_ : Optional[Any] = jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 302
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ : int = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
A_ : List[str] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
A_ : Union[str, Any] = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
A_ : Optional[int] = [element for element in set_a if element in set_b]
if alternative_union:
A_ : Optional[int] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
A_ : Union[str, Any] = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = {'a', 'b', 'c', 'd', 'e'}
lowerCamelCase_ : str = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 302
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a_ = 4
a_ = 3
class _lowercase ( snake_case_ ):
pass
def __lowercase ( lowerCamelCase : List[str] ):
for shard in shards:
for i in range(lowerCamelCase ):
yield {"i": i, "shard": shard}
def __lowercase ( ):
UpperCamelCase_ : Optional[Any] = int(os.environ['RANK'] )
UpperCamelCase_ : Any = int(os.environ['WORLD_SIZE'] )
UpperCamelCase_ : str = ArgumentParser()
parser.add_argument('--streaming' , type=lowerCamelCase )
parser.add_argument('--local_rank' , type=lowerCamelCase )
parser.add_argument('--num_workers' , type=lowerCamelCase , default=0 )
UpperCamelCase_ : Optional[int] = parser.parse_args()
UpperCamelCase_ : Tuple = args.streaming
UpperCamelCase_ : str = args.num_workers
UpperCamelCase_ : int = {'shards': [F"shard_{shard_idx}" for shard_idx in range(lowerCamelCase )]}
UpperCamelCase_ : Dict = IterableDataset.from_generator(lowerCamelCase , gen_kwargs=lowerCamelCase )
if not streaming:
UpperCamelCase_ : int = Dataset.from_list(list(lowerCamelCase ) )
UpperCamelCase_ : Dict = split_dataset_by_node(lowerCamelCase , rank=lowerCamelCase , world_size=lowerCamelCase )
UpperCamelCase_ : Optional[int] = torch.utils.data.DataLoader(lowerCamelCase , num_workers=lowerCamelCase )
UpperCamelCase_ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase_ : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase_ : List[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 417
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a_ = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a_ = {
'RUCAIBox/mvp': 1_024,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = MvpTokenizer
def __init__( self : int , snake_case : str=None , snake_case : int=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]="replace" , snake_case : Optional[int]="<s>" , snake_case : List[Any]="</s>" , snake_case : Dict="</s>" , snake_case : Tuple="<s>" , snake_case : Any="<unk>" , snake_case : Tuple="<pad>" , snake_case : List[str]="<mask>" , snake_case : int=False , snake_case : Tuple=True , **snake_case : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Optional[int] = getattr(snake_case , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Optional[Any] = add_prefix_space
UpperCamelCase_ : int = pre_tok_class(**snake_case )
UpperCamelCase_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_ : Optional[int] = 'post_processor'
UpperCamelCase_ : Optional[Any] = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCamelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_ : Any = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase_ : int = tuple(state['cls'] )
UpperCamelCase_ : Optional[int] = False
if state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Union[str, Any] = add_prefix_space
UpperCamelCase_ : Optional[int] = True
if state.get('trim_offsets' , snake_case ) != trim_offsets:
UpperCamelCase_ : Dict = trim_offsets
UpperCamelCase_ : Optional[int] = True
if changes_to_apply:
UpperCamelCase_ : str = getattr(snake_case , state.pop('type' ) )
UpperCamelCase_ : Union[str, Any] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCamelCase_ : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : int , **snake_case : Dict ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : Optional[int] , **snake_case : int ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[str, Any] , snake_case : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = [self.sep_token_id]
UpperCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 417
| 1
|
lowercase_ = """Input must be a string of 8 numbers plus letter"""
lowercase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = f'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowercase__ = spanish_id.replace("-" , "" ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase : Optional[int] = KandinskyVaaInpaintPipeline
UpperCamelCase : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase : Optional[int] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase : Optional[int] = False
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 3_2
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 3_2
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1_0_0
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
_lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
_lowerCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
_lowerCAmelCase = 0
if str(lowerCamelCase__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCAmelCase = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase__ )
_lowerCAmelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_lowerCAmelCase = 0
_lowerCAmelCase = 'a hat'
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
_lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase = pipeline(
image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 589
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def _A ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
lowercase__ = bnb_quantization_config.load_in_abit
lowercase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowercase__ = []
# custom device map
if isinstance(lowercase__ , lowercase__ ) and len(device_map.keys() ) > 1:
lowercase__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ = get_keys_to_not_convert(lowercase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase__ )
lowercase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ = []
lowercase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase__ )
# compatibility with peft
lowercase__ = load_in_abit
lowercase__ = load_in_abit
lowercase__ = get_parameter_device(lowercase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowercase__ = replace_with_bnb_layers(lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
# convert param to the right dtype
lowercase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowercase__ = getattr(lowercase__ , lowercase__ , lowercase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase__ ):
param.to(lowercase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase__ = replace_with_bnb_layers(
lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
lowercase__ = get_quantized_model_device_map(
lowercase__ , lowercase__ , lowercase__ , max_memory=lowercase__ , no_split_module_classes=lowercase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ = True
lowercase__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowercase__ , lowercase__ , lowercase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase__ , offload_state_dict=lowercase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase__ , device_map=lowercase__ , offload_dir=lowercase__ )
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowercase__ , lowercase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowercase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ = {}
lowercase__ = special_dtypes
lowercase__ = no_split_module_classes
lowercase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ = get_balanced_memory(
lowercase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowercase__ , **lowercase__ , )
lowercase__ = max_memory
lowercase__ = infer_auto_device_map(lowercase__ , **lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
# check if don't have any quantized module on the cpu
lowercase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if modules_to_not_convert is None:
lowercase__ = []
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ):
lowercase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ = []
current_key_name.append(lowercase__ )
if isinstance(lowercase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ = """.""".join(lowercase__ )
lowercase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowercase__ = module.weight.data
if module.bias is not None:
lowercase__ = module.bias.data
bnb_module.requires_grad_(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
lowercase__ = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( lowercase__ ):
# Create a copy of the model
with init_empty_weights():
lowercase__ = deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ = find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ = sum(lowercase__ , [] )
lowercase__ = len(lowercase__ ) > 0
# Check if it is a base model
lowercase__ = False
if hasattr(lowercase__ , """base_model_prefix""" ):
lowercase__ = not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ = list(model.named_children() )
lowercase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ = set(lowercase__ ) - set(lowercase__ )
lowercase__ = list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
lowercase__ = [""".weight""", """.bias"""]
lowercase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ = name.replace(lowercase__ , """""" )
filtered_module_names.append(lowercase__ )
return filtered_module_names
def _A ( lowercase__ ):
for m in model.modules():
if isinstance(lowercase__ , bnb.nn.Linearabit ):
return True
return False
def _A ( lowercase__ ):
return next(parameter.parameters() ).device
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase__ , lowercase__ , 0 , dtype=lowercase__ , value=lowercase__ )
lowercase__ = param_name
lowercase__ = model
if "." in tensor_name:
lowercase__ = tensor_name.split(""".""" )
for split in splits[:-1]:
lowercase__ = getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowercase__ = new_module
lowercase__ = splits[-1]
# offload weights
lowercase__ = False
offload_weight(module._parameters[tensor_name] , lowercase__ , lowercase__ , index=lowercase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ , )
else:
offload_weight(lowercase__ , lowercase__ , lowercase__ , index=lowercase__ )
offload_weight(lowercase__ , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ )
set_module_tensor_to_device(lowercase__ , lowercase__ , """meta""" , dtype=lowercase__ , value=torch.empty(*param.size() ) )
| 325
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( lowercase ):
def __init__( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : str):
super().__init__(*UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = {}
def _snake_case ( self : str , UpperCAmelCase : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Tuple = super().add_tokens(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase)
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer.")
def _snake_case ( self : Any , UpperCAmelCase : List[str] , *UpperCAmelCase : List[Any] , UpperCAmelCase : Dict=1 , **UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase)
output.append(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = placeholder_token + F"_{i}"
self.try_adding_tokens(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase)
output.append(UpperCAmelCase)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent")
SCREAMING_SNAKE_CASE_ :List[str] = output
def _snake_case ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=1.0):
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = []
for i in range(len(UpperCAmelCase)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ :Any = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokens[: 1 + int(len(UpperCAmelCase) * prop_tokens_to_load)]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ :List[str] = copy.copy(UpperCAmelCase)
random.shuffle(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = text.replace(UpperCAmelCase , " ".join(UpperCAmelCase))
return text
def __call__( self : Dict , UpperCAmelCase : List[Any] , *UpperCAmelCase : int , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]=1.0 , **UpperCAmelCase : str):
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase , vector_shuffle=UpperCAmelCase , prop_tokens_to_load=UpperCAmelCase) , *UpperCAmelCase , **UpperCAmelCase , )
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str , *UpperCAmelCase : int , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=1.0 , **UpperCAmelCase : Tuple):
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase , vector_shuffle=UpperCAmelCase , prop_tokens_to_load=UpperCAmelCase) , *UpperCAmelCase , **UpperCAmelCase , )
| 717
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Collection[float] | None = None):
if components is None:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :Optional[int] = list(UpperCAmelCase)
def __len__( self : Optional[Any]):
return len(self.__components)
def __str__( self : List[Any]):
return "(" + ",".join(map(UpperCAmelCase , self.__components)) + ")"
def __add__( self : Optional[int] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :List[str] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.__components[i] + other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else:
raise Exception("must have the same size")
def __sub__( self : List[str] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = [self.__components[i] - other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float):
...
@overload
def __mul__( self : int , UpperCAmelCase : Vector):
...
def __mul__( self : int , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , (float, int)):
SCREAMING_SNAKE_CASE_ :Tuple = [c * other for c in self.__components]
return Vector(UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and len(self) == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self)
SCREAMING_SNAKE_CASE_ :str = [self.__components[i] * other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return sum(UpperCAmelCase)
else: # error case
raise Exception("invalid operand!")
def _snake_case ( self : Any):
return Vector(self.__components)
def _snake_case ( self : str , UpperCAmelCase : int):
if isinstance(UpperCAmelCase , UpperCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : float):
assert -len(self.__components) <= pos < len(self.__components)
SCREAMING_SNAKE_CASE_ :List[str] = value
def _snake_case ( self : str):
if len(self.__components) == 0:
raise Exception("Vector is empty")
SCREAMING_SNAKE_CASE_ :Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase))
def _snake_case ( self : str , UpperCAmelCase : Vector , UpperCAmelCase : bool = False):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self * other
SCREAMING_SNAKE_CASE_ :Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowercase ( a ):
'''simple docstring'''
assert isinstance(a , a )
return Vector([0] * dimension )
def lowercase ( a , a ):
'''simple docstring'''
assert isinstance(a , a ) and (isinstance(a , a ))
SCREAMING_SNAKE_CASE_ :str = [0] * dimension
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
return Vector(a )
def lowercase ( a , a , a ):
'''simple docstring'''
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def lowercase ( a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :int = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :str = matrix
SCREAMING_SNAKE_CASE_ :List[Any] = w
SCREAMING_SNAKE_CASE_ :List[Any] = h
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :Any = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[i][j] + other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__( self : Tuple , UpperCAmelCase : float):
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector):
...
def __mul__( self : List[str] , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , UpperCAmelCase): # matrix-vector
if len(UpperCAmelCase) == self.__width:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = zero_vector(self.__height)
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCAmelCase)
for j in range(self.__width)
]
ans.change_component(UpperCAmelCase , sum(UpperCAmelCase))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!")
elif isinstance(UpperCAmelCase , (int, float)): # matrix-scalar
SCREAMING_SNAKE_CASE_ :Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(UpperCAmelCase , self.__width , self.__height)
return None
def _snake_case ( self : Optional[int]):
return self.__height
def _snake_case ( self : Optional[int]):
return self.__width
def _snake_case ( self : str , UpperCAmelCase : int , UpperCAmelCase : int):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ :Dict = value
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
SCREAMING_SNAKE_CASE_ :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase , UpperCAmelCase)
else:
raise Exception("Indices out of bounds")
def _snake_case ( self : Union[str, Any]):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase) for y in range(self.__width)
]
return sum(UpperCAmelCase)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def lowercase ( a , a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 140
| 0
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : str =[
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Any =emb.weight.shape
A__ : Dict =nn.Linear(a_ , a_ , bias=a_ )
A__ : List[str] =emb.weight.data
return lin_layer
def lowercase ( UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : str =torch.load(a_ , map_location="cpu" )
A__ : List[str] =Namespace(**checkpoint["cfg"]["model"] )
A__ : Union[str, Any] =checkpoint['model']
remove_ignore_keys_(a_ )
A__ : Dict =state_dict['decoder.embed_tokens.weight'].shape[0]
A__ : Tuple ={key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
A__ : Optional[Any] =XGLMConfig(
vocab_size=a_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A__ : Tuple =XGLMForCausalLM(a_ )
A__ : List[str] =model.load_state_dict(a_ , strict=a_ )
print(a_ )
A__ : List[Any] =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A : Tuple = parser.parse_args()
__A : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 656
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase :Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowerCAmelCase :List[str] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowerCAmelCase :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowerCAmelCase :str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 251
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCamelCase__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowercase ( nn.Module ):
def __init__( self : int , lowercase__ : Union[str, Any] ):
super().__init__()
a_ = torchvision.models.resnetaaa(pretrained=lowercase__ )
a_ = list(model.children() )[:-2]
a_ = nn.Sequential(*lowercase__ )
a_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __magic_name__ ( self : int , lowercase__ : List[str] ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a_ = self.pool(self.model(lowercase__ ) )
a_ = torch.flatten(lowercase__ , start_dim=2 )
a_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowercase ( a__ ):
def __init__( self : Dict , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Tuple ):
a_ = [json.loads(lowercase__ ) for l in open(lowercase__ )]
a_ = os.path.dirname(lowercase__ )
a_ = tokenizer
a_ = labels
a_ = len(lowercase__ )
a_ = max_seq_length
a_ = transforms
def __len__( self : Dict ):
return len(self.data )
def __getitem__( self : List[Any] , lowercase__ : str ):
a_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowercase__ ) )
a_ , a_ , a_ = sentence[0], sentence[1:-1], sentence[-1]
a_ = sentence[: self.max_seq_length]
a_ = torch.zeros(self.n_classes )
a_ = 1
a_ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
a_ = self.transforms(lowercase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __magic_name__ ( self : Union[str, Any] ):
a_ = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = [len(row['''sentence'''] ) for row in batch]
a_ , a_ = len(_A ), max(_A )
a_ = torch.zeros(_A , _A , dtype=torch.long )
a_ = torch.zeros(_A , _A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_A , _A ) ):
a_ = input_row['''sentence''']
a_ = 1
a_ = torch.stack([row['''image'''] for row in batch] )
a_ = torch.stack([row['''label'''] for row in batch] )
a_ = torch.stack([row['''image_start_token'''] for row in batch] )
a_ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase__ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase__ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 143
|
from __future__ import annotations
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = [True] * limit
a_ = False
a_ = False
a_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < limit:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def UpperCAmelCase__ ( _A = 1_000_000 ):
"""simple docstring"""
a_ = prime_sieve(_A )
a_ = 0
a_ = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
a_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a_ = j - i
a_ = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 143
| 1
|
"""simple docstring"""
from PIL import Image
def __snake_case ( SCREAMING_SNAKE_CASE__ : Image , SCREAMING_SNAKE_CASE__ : float ) -> Image:
'''simple docstring'''
def brightness(SCREAMING_SNAKE_CASE__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCAmelCase : int = change_brightness(img, 1_00)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 289
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'{test_file} instead.' )
_UpperCAmelCase : int = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase : Any = components[:-1] + [test_fn.replace(".py" , "" )]
_UpperCAmelCase : List[str] = ".".join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_module_path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , "setUp" ):
test.setUp()
_UpperCAmelCase : int = None
if hasattr(SCREAMING_SNAKE_CASE__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase : List[str] = test.model_tester.__class__
return model_tester
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = []
for test_class in test_classes:
_UpperCAmelCase : Optional[Any] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 289
| 1
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 3 )-> qiskit.result.counts.Counts:
if isinstance(snake_case , snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_lowerCamelCase = QuantumRegister(snake_case , 'qr' )
_lowerCamelCase = ClassicalRegister(snake_case , 'cr' )
_lowerCamelCase = QuantumCircuit(snake_case , snake_case )
_lowerCamelCase = number_of_qubits
for i in range(snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case , snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case , snake_case )
# simulate with 10000 shots
_lowerCamelCase = Aer.get_backend('qasm_simulator' )
_lowerCamelCase = execute(snake_case , snake_case , shots=10_000 )
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 222
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( )-> int:
_lowerCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_lowerCamelCase = Dataset.from_dict(snake_case )
return dataset
class __a ( lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = get_dataset()
_lowerCamelCase = make_duplicate_clusters(a__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case_ ( self ):
_lowerCamelCase = get_dataset()
_lowerCamelCase , _lowerCamelCase = deduplicate_dataset(a__ )
self.assertEqual(len(a__ ) , 2 )
print(a__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a__ )
| 222
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowerCamelCase (_a ):
_lowercase = """fnet"""
def __init__( self: Optional[Any],A_: str=3_2000,A_: Optional[Any]=768,A_: str=12,A_: List[str]=3072,A_: Union[str, Any]="gelu_new",A_: Optional[int]=0.1,A_: List[str]=512,A_: Optional[Any]=4,A_: Optional[int]=0.0_2,A_: Optional[Any]=1E-12,A_: int=False,A_: Any=512,A_: Optional[Any]=3,A_: List[Any]=1,A_: Tuple=2,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_tpu_fourier_optimizations
__UpperCamelCase = tpu_short_seq_length
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( A_ : list[int] ) -> list[int]: # This function is recursive
__UpperCAmelCase = len(A_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase = array[0]
__UpperCAmelCase = False
__UpperCAmelCase = 1
__UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase = True
__UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase = longest_subsequence(A_ )
if len(A_ ) > len(A_ ):
__UpperCAmelCase = temp_array
else:
i += 1
__UpperCAmelCase = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase = [pivot, *longest_subsequence(A_ )]
if len(A_ ) > len(A_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowercase_ ( self ):
'''simple docstring'''
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
A__ = "xvjiarui/stable-diffusion-2-inpainting"
A__ , A__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ )
A__ = "Face of a yellow cat, high resolution, sitting on a park bench"
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = num_samples * [init_image]
A__ = num_samples * [mask_image]
A__ , A__ , A__ = pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# shard inputs and rng
A__ = replicate(UpperCamelCase__ )
A__ = jax.random.split(UpperCamelCase__ , jax.device_count() )
A__ = shard(UpperCamelCase__ )
A__ = shard(UpperCamelCase__ )
A__ = shard(UpperCamelCase__ )
A__ = pipeline(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ )
A__ = output.images.reshape(UpperCamelCase__ , 5_12 , 5_12 , 3 )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 261
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Tuple = """blenderbot-small"""
lowercase__ : List[Any] = ["""past_key_values"""]
lowercase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_02_65 , UpperCamelCase__=5_12 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=5_12 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ = {0: "batch"}
A__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ = {0: "batch", 1: "decoder_sequence"}
A__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
A__ = common_inputs["decoder_input_ids"].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(UpperCamelCase__ , UpperCamelCase__ )
A__ = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A__ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs["attention_mask"].dtype
A__ = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A__ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 261
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(lowercase , params=lowercase ).content , "html.parser" )
SCREAMING_SNAKE_CASE : Union[str, Any] = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
snake_case = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 62
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase : Tuple = bs[:]
__UpperCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Union[str, Any] = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Tuple = char
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , **lowercase__ , ):
__UpperCAmelCase : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else bos_token
__UpperCAmelCase : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else eos_token
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else sep_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else cls_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token
__UpperCAmelCase : List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : Optional[int] = json.load(lowercase__)
__UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[Any] = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[int] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : int = [tuple(merge.split()) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : List[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : int = tuple(lowercase__)
__UpperCAmelCase : int = get_pairs(lowercase__)
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Tuple = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : List[Any] = word.index(lowercase__ , lowercase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__UpperCAmelCase : str = j
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : Union[str, Any] = tuple(lowercase__)
__UpperCAmelCase : Dict = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(lowercase__)
__UpperCAmelCase : List[Any] = ''' '''.join(lowercase__)
__UpperCAmelCase : Tuple = word
return word
def A( self , lowercase__):
__UpperCAmelCase : str = []
for token in re.findall(self.pat , lowercase__):
__UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__).split(''' '''))
return bpe_tokens
def A( self , lowercase__):
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : str = ''''''.join(lowercase__)
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Optional[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : Tuple = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : Optional[int] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__)
if token_ids_a is None:
return [1] + ([0] * len(lowercase__)) + [1]
return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__=False , **lowercase__):
__UpperCAmelCase : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase__) > 0 and not text[0].isspace()):
__UpperCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : int = len(encoded_inputs['''global_attention_mask''']) != len(lowercase__)
if needs_to_be_padded:
__UpperCAmelCase : Dict = len(lowercase__) - len(encoded_inputs['''global_attention_mask'''])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : Optional[Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side))
return encoded_inputs
| 462
| 0
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[str]:
_snake_case : str = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Optional[Any]:
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : Union[str, Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
_snake_case : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Any:
_snake_case : Optional[Any] = torch.load(lowerCAmelCase , map_location='cpu' )
_snake_case : Optional[int] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase )
_snake_case : str = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Union[str, Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : List[str] = XGLMConfig(
vocab_size=lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : List[Any] = XGLMForCausalLM(lowerCAmelCase )
_snake_case : Optional[int] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 669
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Tuple = logging.get_logger(__name__)
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = downstream_dict["projector.weight"]
lowerCamelCase_ : Optional[int] = downstream_dict["projector.bias"]
lowerCamelCase_ : List[str] = downstream_dict["model.post_net.linear.weight"]
lowerCamelCase_ : List[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowerCamelCase_ : str = downstream_dict["model.linear.weight"]
lowerCamelCase_ : List[str] = downstream_dict["model.linear.bias"]
return model
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = UniSpeechSatForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = downstream_dict["connector.weight"]
lowerCamelCase_ : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase_ : List[str] = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
lowerCamelCase_ : str = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
lowerCamelCase_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCamelCase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCamelCase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCamelCase_ : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCamelCase_ : Tuple = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = torch.load(__UpperCAmelCase , map_location="cpu" )
lowerCamelCase_ : List[str] = checkpoint["Downstream"]
lowerCamelCase_ : Tuple = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
lowerCamelCase_ : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCamelCase_ : str = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCamelCase_ : List[Any] = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith("ForXVector" ):
lowerCamelCase_ : List[Any] = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase_ : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
snake_case_ : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 488
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = IFImgaImgSuperResolutionPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any , __magic_name__ : Any=0 ) -> str:
if str(__magic_name__ ).startswith("mps" ):
lowerCamelCase_ : List[str] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase_ : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase_ : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
self._test_save_load_local()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 488
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]=13 , lowercase_ : List[str]=7 , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : str=99 , lowercase_ : List[Any]=32 , lowercase_ : Optional[Any]=5 , lowercase_ : int=4 , lowercase_ : Optional[int]=37 , lowercase_ : Dict="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=128 , lowercase_ : str=32 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : int=0.02 , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
lowercase_ : str = parent
lowercase_ : Dict = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Union[str, Any] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Any = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : List[Any] = num_choices
lowercase_ : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Optional[int] = None
lowercase_ : int = None
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
(
lowercase_
) : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : Any , lowercase_ : str ):
lowercase_ : Dict = NezhaModel(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Any = model(a_ , attention_mask=a_ , token_type_ids=a_ )
lowercase_ : Union[str, Any] = model(a_ , token_type_ids=a_ )
lowercase_ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , ):
lowercase_ : str = True
lowercase_ : Dict = NezhaModel(a_ )
model.to(a_ )
model.eval()
lowercase_ : Optional[Any] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowercase_ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , encoder_hidden_states=a_ , )
lowercase_ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
lowercase_ : Optional[int] = NezhaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = NezhaForNextSentencePrediction(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Optional[int] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Dict ):
lowercase_ : Tuple = NezhaForPreTraining(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , next_sentence_label=a_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Dict ):
lowercase_ : List[Any] = NezhaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict ):
lowercase_ : Any = self.num_labels
lowercase_ : Union[str, Any] = NezhaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Dict ):
lowercase_ : int = self.num_labels
lowercase_ : List[Any] = NezhaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict ):
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : Optional[Any] = NezhaForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = self.prepare_config_and_inputs()
(
lowercase_
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __a, __a, __a, unittest.TestCase):
UpperCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str]=False ):
lowercase_ : Any = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
lowercase_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ )
lowercase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = NezhaModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
(
lowercase_
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = NezhaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ : str = True
lowercase_ : Tuple = model_class(config=a_ )
lowercase_ : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase_ : Optional[Any] = torch.jit.trace(
a_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , """bert.pt""" ) )
lowercase_ : List[str] = torch.jit.load(os.path.join(a_ , """bert.pt""" ) , map_location=a_ )
loaded(inputs_dict["""input_ids"""].to(a_ ) , inputs_dict["""attention_mask"""].to(a_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : List[Any] = model(a_ , attention_mask=a_ )[0]
lowercase_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a_ )
lowercase_ : List[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Any = model(a_ , attention_mask=a_ )[0]
lowercase_ : Optional[int] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , a_ )
lowercase_ : int = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 706
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = torch.device('cpu')
def a ():
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
def a (lowerCAmelCase__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = dct.pop(lowerCAmelCase__ )
__a = val
def a (lowerCAmelCase__ ):
__a = []
for k in state_dict.keys():
__a = k
if ".pwconv" in k:
__a = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__a = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__a = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__a = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__a = k_new.split(""".""" )
if ls[2].isdigit():
__a = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__a = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a = 1_000
__a = """huggingface/label-files"""
__a = """imagenet-1k-id2label.json"""
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a = [3, 3, 6, 4]
__a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a = [3, 3, 9, 6]
__a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a = [4, 3, 10, 5]
__a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a = [4, 4, 12, 6]
__a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__a = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )
else:
__a = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
__a = checkpoint
__a = create_rename_keys(lowerCAmelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
__a = SwiftFormerForImageClassification(lowerCAmelCase__ ).eval()
hf_model.load_state_dict(lowerCAmelCase__ )
# prepare test inputs
__a = prepare_img()
__a = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__a = processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
# compare outputs from both models
__a = get_expected_output(lowerCAmelCase__ )
__a = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase__ , atol=1E-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 99
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
super().__init__(features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = Sql(
cache_dir=_lowerCamelCase , features=_lowerCamelCase , sql=_lowerCamelCase , con=_lowerCamelCase , **_lowerCamelCase , )
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , )
# Build dataset for splits
UpperCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split="""train""" , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory)
return dataset
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''')
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Optional[int] = name
UpperCAmelCase__ : str = con
UpperCAmelCase__ : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Union[str, Any] = num_proc
UpperCAmelCase__ : List[str] = to_sql_kwargs
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.to_sql_kwargs.pop("""sql""" , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop("""con""" , _lowerCamelCase)
UpperCAmelCase__ : int = self.to_sql_kwargs.pop("""index""" , _lowerCamelCase)
UpperCAmelCase__ : Any = self._write(index=_lowerCamelCase , **self.to_sql_kwargs)
return written
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = args
UpperCAmelCase__ : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : str = query_table(
table=self.dataset.data , key=slice(_lowerCamelCase , offset + self.batch_size) , indices=self.dataset._indices , )
UpperCAmelCase__ : List[str] = batch.to_pandas()
UpperCAmelCase__ : List[str] = df.to_sql(self.name , self.con , index=_lowerCamelCase , **_lowerCamelCase)
return num_rows or len(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _lowerCamelCase , _lowerCamelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 407
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :int , SCREAMING_SNAKE_CASE :int = 1_6 , SCREAMING_SNAKE_CASE :int = 8_8 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :float = 0.0 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str = "geglu" , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_a : Dict =num_attention_heads
_a : str =attention_head_dim
_a : Optional[int] =num_attention_heads * attention_head_dim
_a : Tuple =in_channels
_a : List[str] =torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE , num_channels=SCREAMING_SNAKE_CASE , eps=1e-6 , affine=SCREAMING_SNAKE_CASE )
_a : Tuple =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. Define transformers blocks
_a : str =nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , cross_attention_dim=SCREAMING_SNAKE_CASE , activation_fn=SCREAMING_SNAKE_CASE , attention_bias=SCREAMING_SNAKE_CASE , double_self_attention=SCREAMING_SNAKE_CASE , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
for d in range(SCREAMING_SNAKE_CASE )
] )
_a : Optional[Any] =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Dict=1 , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :bool = True , ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =hidden_states.shape
_a : str =batch_frames // num_frames
_a : List[str] =hidden_states
_a : Any =hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Any =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_a : Any =self.norm(SCREAMING_SNAKE_CASE )
_a : str =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =self.proj_in(SCREAMING_SNAKE_CASE )
# 2. Blocks
for block in self.transformer_blocks:
_a : Dict =block(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , cross_attention_kwargs=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE , )
# 3. Output
_a : Union[str, Any] =self.proj_out(SCREAMING_SNAKE_CASE )
_a : List[str] =(
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_a : List[Any] =hidden_states.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE )
| 703
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506
| 0
|
import numpy as np
lowerCamelCase =[
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : Tuple = np.array(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = np.where(letter == self.SQUARE )
UpperCamelCase__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = message.lower()
UpperCamelCase__ : int = message.replace(''' ''' , '''''' )
UpperCamelCase__ : List[Any] = message.replace('''j''' , '''i''' )
UpperCamelCase__ : Optional[Any] = np.empty((2, len(__SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : List[Any] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase__ : int = numbers[0]
UpperCamelCase__ : Dict = numbers[1]
UpperCamelCase__ : Optional[int] = first_step.reshape(2 * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Tuple = int(second_step[numbers_index * 2] )
UpperCamelCase__ : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase__ : Optional[Any] = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = encoded_message + letter
return encoded_message
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = message.lower()
message.replace(''' ''' , '''''' )
UpperCamelCase__ : Optional[int] = np.empty(2 * len(__SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : int = self.letter_to_numbers(message[letter_index] )
UpperCamelCase__ : List[Any] = numbers[0]
UpperCamelCase__ : List[Any] = numbers[1]
UpperCamelCase__ : int = first_step.reshape((2, len(__SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[Any] = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Tuple = int(second_step[0, numbers_index] )
UpperCamelCase__ : Optional[int] = int(second_step[1, numbers_index] )
UpperCamelCase__ : str = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = decoded_message + letter
return decoded_message
| 285
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : int = {}
if prompt is not None:
UpperCamelCase__ : int = prompt
if generate_kwargs is not None:
UpperCamelCase__ : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
UpperCamelCase__ : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
UpperCamelCase__ : Optional[int] = self.model.config.model_type
if model_type == "git":
UpperCamelCase__ : Optional[int] = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : str = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
UpperCamelCase__ : Dict = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase__ : int = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
UpperCamelCase__ : Tuple = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase__ : int = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCamelCase__ : str = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase__ : Optional[Any] = None
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['''input_ids'''] )
):
UpperCamelCase__ : Union[str, Any] = None
if generate_kwargs is None:
UpperCamelCase__ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase__ : Any = model_inputs.pop(self.model.main_input_name )
UpperCamelCase__ : Any = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = []
for output_ids in model_outputs:
UpperCamelCase__ : str = {
'''generated_text''': self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 285
| 1
|
from itertools import count
def __lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 721
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["MobileNetV2FeatureExtractor"]
_A = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 505
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 505
| 1
|
"""simple docstring"""
from torch import nn
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , a : Dict , a : List[str] ):
"""simple docstring"""
super().__init__()
__snake_case : Any =class_size
__snake_case : Optional[int] =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__snake_case : List[str] =nn.Linear(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( self : Dict , a : str ):
"""simple docstring"""
__snake_case : Any =self.mlp(__lowerCamelCase )
return logits
| 702
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _lowercase ( lowerCAmelCase ):
_a : str = '''ctrl'''
_a : Optional[int] = ['''past_key_values''']
_a : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , a : Optional[int]=2_4_6_5_3_4 , a : Optional[int]=2_5_6 , a : Optional[int]=1_2_8_0 , a : Any=8_1_9_2 , a : int=4_8 , a : Optional[Any]=1_6 , a : str=0.1 , a : str=0.1 , a : Any=1e-6 , a : Optional[int]=0.0_2 , a : int=True , **a : str , ):
"""simple docstring"""
__snake_case : Tuple =vocab_size
__snake_case : Optional[Any] =n_positions
__snake_case : List[Any] =n_embd
__snake_case : Any =n_layer
__snake_case : Any =n_head
__snake_case : str =dff
__snake_case : List[str] =resid_pdrop
__snake_case : str =embd_pdrop
__snake_case : Union[str, Any] =layer_norm_epsilon
__snake_case : List[Any] =initializer_range
__snake_case : Any =use_cache
super().__init__(**a )
| 497
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = 0
lowerCamelCase : bool = False
lowerCamelCase : float = 3.0
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__SCREAMING_SNAKE_CASE ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _a ( self : str ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCAmelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __SCREAMING_SNAKE_CASE )
@require_multi_gpu
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
__A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A = Accelerator(kwargs_handlers=[ddp_scaler])
__A = torch.nn.Linear(1_00, 2_00)
__A = accelerator.prepare(model)
# Check the values changed in kwargs
__A = ""
__A = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 68
|
'''simple docstring'''
from typing import List
import numpy as np
def _A ( snake_case ) -> int:
_lowercase : Optional[int] = {key: len(snake_case ) for key, value in gen_kwargs.items() if isinstance(snake_case , snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_lowercase : int = max(lists_lengths.values() , default=0 )
return max(1 , snake_case )
def _A ( snake_case , snake_case ) -> List[range]:
_lowercase : int = []
for group_idx in range(snake_case ):
_lowercase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowercase : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowercase : Optional[Any] = range(snake_case , start + num_shards_to_add )
shards_indices_per_group.append(snake_case )
return shards_indices_per_group
def _A ( snake_case , snake_case ) -> List[dict]:
_lowercase : Optional[Any] = _number_of_shards_in_gen_kwargs(snake_case )
if num_shards == 1:
return [dict(snake_case )]
else:
_lowercase : Any = _distribute_shards(num_shards=snake_case , max_num_jobs=snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case , snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case ) )
]
def _A ( snake_case ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( snake_case , snake_case ) -> dict:
_lowercase : Any = {len(snake_case ) for value in gen_kwargs.values() if isinstance(snake_case , snake_case )}
_lowercase : Optional[int] = {}
for size in list_sizes:
_lowercase : Optional[Any] = list(range(snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowercase : Dict = dict(snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case , snake_case ):
_lowercase : Tuple = [value[i] for i in indices_per_size[len(snake_case )]]
return shuffled_kwargs
| 245
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 137
| 0
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
return getitem, k
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> str:
return setitem, k, v
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
return delitem, k
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , *_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase ), None
except Exception as e:
return None, e
a : Optional[int] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
a : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
a : List[str] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
a : Dict = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
a : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> int:
__snake_case = HashMap(initial_block_size=4 )
__snake_case = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
def is_public(_UpperCAmelCase : str ) -> bool:
return not name.startswith("_" )
__snake_case = {name for name in dir({} ) if is_public(_UpperCAmelCase )}
__snake_case = {name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 69
|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase : str = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase = g.get_repo("""huggingface/diffusers""" )
lowerCamelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
lowerCamelCase = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 457
| 0
|
'''simple docstring'''
from itertools import count
def A_ ( SCREAMING_SNAKE_CASE_ = 50 ) ->int:
lowercase_ = [1] * min_block_length
for n in count(SCREAMING_SNAKE_CASE_ ):
fill_count_functions.append(1 )
for block_length in range(SCREAMING_SNAKE_CASE_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 603
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase_ = 1
for n in range(m + 1 ):
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__snake_case = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__snake_case = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 603
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase ( _a ):
lowercase__ : Union[str, Any] = """efficientnet"""
def __init__( self : str , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 600 , _UpperCamelCase : float = 2.0 , _UpperCamelCase : float = 3.1 , _UpperCamelCase : int = 8 , _UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , _UpperCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , _UpperCamelCase : List[int] = [] , _UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _UpperCamelCase : float = 0.2_5 , _UpperCamelCase : str = "swish" , _UpperCamelCase : int = 2_560 , _UpperCamelCase : str = "mean" , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : float = 0.0_0_1 , _UpperCamelCase : float = 0.9_9 , _UpperCamelCase : float = 0.5 , _UpperCamelCase : float = 0.2 , **_UpperCamelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = width_coefficient
SCREAMING_SNAKE_CASE = depth_coefficient
SCREAMING_SNAKE_CASE = depth_divisor
SCREAMING_SNAKE_CASE = kernel_sizes
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = out_channels
SCREAMING_SNAKE_CASE = depthwise_padding
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = num_block_repeats
SCREAMING_SNAKE_CASE = expand_ratios
SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = pooling_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = batch_norm_eps
SCREAMING_SNAKE_CASE = batch_norm_momentum
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = drop_connect_rate
SCREAMING_SNAKE_CASE = sum(lowerCAmelCase_ ) * 4
class lowercase ( _a ):
lowercase__ : Union[str, Any] = version.parse("""1.11""" )
@property
def __snake_case( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case( self : Dict ) -> float:
'''simple docstring'''
return 1e-5
| 403
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a (A_ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''umt5'''
__UpperCAmelCase : List[Any] = ['''past_key_values''']
def __init__( self : Any , lowerCamelCase : Tuple=250112 , lowerCamelCase : Dict=512 , lowerCamelCase : int=64 , lowerCamelCase : str=1024 , lowerCamelCase : List[str]=8 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : str=6 , lowerCamelCase : List[Any]=32 , lowerCamelCase : List[str]=128 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[Any]=1E-6 , lowerCamelCase : List[Any]=1.0 , lowerCamelCase : Optional[int]="gated-gelu" , lowerCamelCase : Tuple=True , lowerCamelCase : Tuple=True , lowerCamelCase : int="T5Tokenizer" , lowerCamelCase : Optional[int]=True , lowerCamelCase : Tuple=0 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , **lowerCamelCase : str , ) -> Union[str, Any]:
super().__init__(
is_encoder_decoder=lowerCamelCase , tokenizer_class=lowerCamelCase , tie_word_embeddings=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[int] = vocab_size
__snake_case : Dict = d_model
__snake_case : Any = d_kv
__snake_case : Optional[int] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : List[str] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Union[str, Any] = relative_attention_max_distance
__snake_case : str = dropout_rate
__snake_case : List[str] = layer_norm_epsilon
__snake_case : Tuple = initializer_factor
__snake_case : Tuple = feed_forward_proj
__snake_case : Dict = use_cache
__snake_case : Dict = self.feed_forward_proj.split("-" )
__snake_case : List[str] = act_info[-1]
__snake_case : Any = act_info[0] == """gated"""
if len(lowerCamelCase ) > 1 and act_info[0] != "gated" or len(lowerCamelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
__snake_case : Optional[int] = """gelu_new"""
@property
def __snake_case ( self : Any ) -> Any:
return self.d_model
@property
def __snake_case ( self : Optional[Any] ) -> Dict:
return self.num_heads
@property
def __snake_case ( self : Optional[Any] ) -> int:
return self.num_layers
class a (A_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __snake_case ( self : str ) -> Dict:
__snake_case : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__snake_case : Optional[int] = """past_encoder_sequence + sequence"""
__snake_case : Optional[int] = {0: """batch"""}
__snake_case : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """decoder_sequence"""}
__snake_case : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
return 13
@property
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
return 5E-4
| 708
|
from maths.prime_check import is_prime
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCamelCase : Dict ={'vocab_file': 'vocab.txt'}
_UpperCamelCase : Optional[int] ={
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
_UpperCamelCase : Optional[int] ={
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def a__ (__lowercase :List[str] ) -> Optional[int]:
with open(__lowercase , '''r''' ) as f:
_A : str = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__ ( __snake_case ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : str = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : int = ["input_ids", "attention_mask"]
def __init__( self ,A__ ,A__="<unk>" ,A__="<cls>" ,A__="<pad>" ,A__="<mask>" ,A__="<eos>" ,**A__ ,):
super().__init__(**A__ )
_A : List[Any] = load_vocab_file(A__ )
_A : str = dict(enumerate(self.all_tokens ) )
_A : Optional[int] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A : Optional[int] = unk_token
_A : Optional[Any] = cls_token
_A : Any = pad_token
_A : Optional[int] = mask_token
_A : List[str] = eos_token
_A : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self ,A__ ):
return self._id_to_token.get(A__ ,self.unk_token )
def A__ ( self ,A__ ):
return self._token_to_id.get(A__ ,self._token_to_id.get(self.unk_token ) )
def A__ ( self ,A__ ,**A__ ):
return text.split()
def A__ ( self ,A__=False ):
return len(self._id_to_token )
def A__ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self ,A__ ):
return self._token_to_id.get(A__ ,self._token_to_id.get(self.unk_token ) )
def A__ ( self ,A__ ):
return self._id_to_token.get(A__ ,self.unk_token )
def A__ ( self ,A__ ,A__ = None ):
_A : Optional[int] = [self.cls_token_id]
_A : Optional[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self ,A__ ,A__ = None ,A__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A : Optional[Any] = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def A__ ( self ,A__ ,A__ ):
_A : Union[str, Any] = os.path.join(A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ ,'''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ):
return self.get_vocab_size(with_added_tokens=A__ )
def A__ ( self ,A__ ,A__ = False ):
return super()._add_tokens(A__ ,special_tokens=A__ )
| 206
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_UpperCamelCase : Tuple ='\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_UpperCamelCase : Tuple ='\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_UpperCamelCase : Any ='\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_UpperCamelCase : Optional[Any] ='\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_UpperCamelCase : Tuple ='The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def A__ ( self ,A__ ,A__ ,A__=[1, 10, 100] ,A__=4 ,A__=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=A__ ) as executor:
_A : Any = []
_A : List[Any] = Counter()
_A : Optional[Any] = 0
_A : Any = defaultdict(A__ )
for task_id, (candidates, test_case) in enumerate(zip(A__ ,A__ ) ):
for candidate in candidates:
_A : List[str] = candidate + '''\n''' + test_case
_A : Any = (test_program, timeout, task_id, completion_id[task_id])
_A : Union[str, Any] = executor.submit(A__ ,*A__ )
futures.append(A__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(A__ ):
_A : List[str] = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
_A , _A : str = [], []
for result in results.values():
result.sort()
_A : Optional[Any] = [r[1]['''passed'''] for r in result]
total.append(len(A__ ) )
correct.append(sum(A__ ) )
_A : int = np.array(A__ )
_A : Dict = np.array(A__ )
_A : Optional[Any] = k
_A : Union[str, Any] = {f"""pass@{k}""": estimate_pass_at_k(A__ ,A__ ,A__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ (__lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Any ) -> List[Any]:
def estimator(__lowercase :int , __lowercase :int , __lowercase :int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__lowercase , __lowercase ):
_A : Optional[Any] = itertools.repeat(__lowercase , len(__lowercase ) )
else:
assert len(__lowercase ) == len(__lowercase )
_A : Dict = iter(__lowercase )
return np.array([estimator(int(__lowercase ) , int(__lowercase ) , __lowercase ) for n, c in zip(__lowercase , __lowercase )] )
| 206
| 1
|
import os
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = len(grid[0] )
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase ):
for j in range(n_rows - 3 ):
UpperCamelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCamelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCamelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCamelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCamelCase = max(
lowercase , lowercase , lowercase , lowercase )
if max_product > largest:
UpperCamelCase = max_product
return largest
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
with open(os.path.dirname(lowercase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
UpperCamelCase = [[int(lowercase ) for i in grid[j]] for j in range(len(lowercase ) )]
return largest_product(lowercase )
if __name__ == "__main__":
print(solution())
| 713
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowercase: Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_UpperCamelCase: Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_UpperCamelCase: Optional[int] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCamelCase: str = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_UpperCamelCase: Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(UpperCAmelCase_ )-1}""" )
if "norm" in key:
_UpperCamelCase: Dict = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCamelCase: int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_UpperCamelCase: Optional[Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(UpperCAmelCase_ )-1}""" )
if "layer_norm1" in key:
_UpperCamelCase: List[str] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_UpperCamelCase: List[str] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_UpperCamelCase: Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
_UpperCamelCase: Tuple = key.replace(F"""block{idx}""" , F"""block.{int(UpperCAmelCase_ )-1}""" )
if "attn.q" in key:
_UpperCamelCase: List[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_UpperCamelCase: Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_UpperCamelCase: List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_UpperCamelCase: Dict = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_UpperCamelCase: Optional[int] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_UpperCamelCase: Union[str, Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_UpperCamelCase: Tuple = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_UpperCamelCase: List[str] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCamelCase: List[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_UpperCamelCase: str = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(UpperCAmelCase_ )-1}""" )
if "bot_conv" in key:
_UpperCamelCase: Any = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_UpperCamelCase: int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_UpperCamelCase: List[str] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_UpperCamelCase: Any = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_UpperCamelCase: str = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_UpperCamelCase: List[str] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_UpperCamelCase: List[Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_UpperCamelCase: List[str] = key.replace('''module.last_layer_depth''' , '''head.head''' )
_UpperCamelCase: Dict = value
return new_state_dict
def lowerCAmelCase_ ( lowercase: str , lowercase: int ) -> Optional[Any]:
'''simple docstring'''
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCamelCase: Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCamelCase: List[str] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCamelCase: Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCamelCase: Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCamelCase: str = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCamelCase: Any = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase: Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase: Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: int , lowercase: Optional[int]=False , lowercase: List[str]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCamelCase: Any = GLPNImageProcessor()
# prepare image
_UpperCamelCase: Optional[Any] = prepare_img()
_UpperCamelCase: List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_UpperCamelCase: Any = torch.load(UpperCAmelCase_ , map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase: Optional[int] = rename_keys(UpperCAmelCase_ )
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# create HuggingFace model and load state dict
_UpperCamelCase: Optional[int] = GLPNForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# forward pass
_UpperCamelCase: int = model(UpperCAmelCase_ )
_UpperCamelCase: Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCamelCase: List[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_UpperCamelCase: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
_UpperCamelCase: Optional[int] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase_ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
UpperCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 271
|
def lowerCamelCase_ ( UpperCAmelCase_ : int = 10**12 ):
lowercase : List[str] = 1
lowercase : Optional[Any] = 0
lowercase : Tuple = 1
lowercase : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'{solution() = }')
| 583
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCAmelCase ( ):
_lowerCAmelCase:List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_lowerCAmelCase:Union[str, Any] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
_lowerCAmelCase:Union[str, Any] = parser.parse_args()
if not hasattr(snake_case , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase:Any = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 439
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCamelCase_ , unittest.TestCase ):
snake_case__ = AudioLDMPipeline
snake_case__ = TEXT_TO_AUDIO_PARAMS
snake_case__ = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case__ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=(32, 64) ,class_embed_type='''simple_projection''' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=a__ ,)
_lowerCAmelCase:Optional[int] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=a__ ,set_alpha_to_one=a__ ,)
torch.manual_seed(0)
_lowerCAmelCase:Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0)
_lowerCAmelCase:Dict = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,)
_lowerCAmelCase:str = ClapTextModelWithProjection(a__)
_lowerCAmelCase:Dict = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' ,model_max_length=77)
_lowerCAmelCase:Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=a__ ,)
_lowerCAmelCase:List[Any] = SpeechTaHifiGan(a__)
_lowerCAmelCase:Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __UpperCamelCase ( self : List[Any] ,a__ : int ,a__ : Tuple=0) -> Optional[int]:
"""simple docstring"""
if str(a__).startswith('''mps'''):
_lowerCAmelCase:Tuple = torch.manual_seed(a__)
else:
_lowerCAmelCase:Dict = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Tuple = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Dict = self.get_dummy_components()
_lowerCAmelCase:List[str] = AudioLDMPipeline(**a__)
_lowerCAmelCase:Optional[int] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:str = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe(**a__)
_lowerCAmelCase:List[str] = output.audios[0]
assert audio.ndim == 1
assert len(a__) == 256
_lowerCAmelCase:List[str] = audio[:10]
_lowerCAmelCase:Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def __UpperCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = self.get_dummy_components()
_lowerCAmelCase:Optional[Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe.to(a__)
_lowerCAmelCase:Any = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Dict = self.get_dummy_inputs(a__)
_lowerCAmelCase:List[Any] = 3 * [inputs['''prompt''']]
# forward
_lowerCAmelCase:Dict = audioldm_pipe(**a__)
_lowerCAmelCase:Union[str, Any] = output.audios[0]
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = 3 * [inputs.pop('''prompt''')]
_lowerCAmelCase:Tuple = audioldm_pipe.tokenizer(
a__ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a__ ,return_tensors='''pt''' ,)
_lowerCAmelCase:Optional[int] = text_inputs['''input_ids'''].to(a__)
_lowerCAmelCase:List[Any] = audioldm_pipe.text_encoder(
a__ ,)
_lowerCAmelCase:int = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase:List[Any] = F.normalize(a__ ,dim=-1)
_lowerCAmelCase:int = prompt_embeds
# forward
_lowerCAmelCase:Tuple = audioldm_pipe(**a__)
_lowerCAmelCase:Dict = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Any = self.get_dummy_components()
_lowerCAmelCase:str = AudioLDMPipeline(**a__)
_lowerCAmelCase:int = audioldm_pipe.to(a__)
_lowerCAmelCase:Any = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Any = self.get_dummy_inputs(a__)
_lowerCAmelCase:Tuple = 3 * ['''this is a negative prompt''']
_lowerCAmelCase:str = negative_prompt
_lowerCAmelCase:List[Any] = 3 * [inputs['''prompt''']]
# forward
_lowerCAmelCase:Optional[Any] = audioldm_pipe(**a__)
_lowerCAmelCase:Any = output.audios[0]
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Tuple = 3 * [inputs.pop('''prompt''')]
_lowerCAmelCase:Tuple = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase:str = audioldm_pipe.tokenizer(
a__ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a__ ,return_tensors='''pt''' ,)
_lowerCAmelCase:Dict = text_inputs['''input_ids'''].to(a__)
_lowerCAmelCase:int = audioldm_pipe.text_encoder(
a__ ,)
_lowerCAmelCase:List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase:List[str] = F.normalize(a__ ,dim=-1)
embeds.append(a__)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = embeds
# forward
_lowerCAmelCase:List[str] = audioldm_pipe(**a__)
_lowerCAmelCase:int = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase:Union[str, Any] = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Union[str, Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:int = '''egg cracking'''
_lowerCAmelCase:Union[str, Any] = audioldm_pipe(**a__ ,negative_prompt=a__)
_lowerCAmelCase:Dict = output.audios[0]
assert audio.ndim == 1
assert len(a__) == 256
_lowerCAmelCase:Optional[int] = audio[:10]
_lowerCAmelCase:Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Optional[int] = self.get_dummy_components()
_lowerCAmelCase:str = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Any = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Tuple = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase:str = audioldm_pipe(a__ ,num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase:Dict = 2
_lowerCAmelCase:List[str] = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase:Any = 2
_lowerCAmelCase:Tuple = audioldm_pipe(a__ ,num_inference_steps=2 ,num_waveforms_per_prompt=a__).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase:str = 2
_lowerCAmelCase:List[Any] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=a__).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Dict = self.get_dummy_components()
_lowerCAmelCase:Optional[Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 ,**a__)
_lowerCAmelCase:List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(a__) / vocoder_sampling_rate == 0.016
_lowerCAmelCase:List[str] = audioldm_pipe(audio_length_in_s=0.032 ,**a__)
_lowerCAmelCase:Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(a__) / vocoder_sampling_rate == 0.032
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.get_dummy_components()
_lowerCAmelCase:Tuple = AudioLDMPipeline(**a__)
_lowerCAmelCase:Dict = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[int] = ['''hey''']
_lowerCAmelCase:List[str] = audioldm_pipe(a__ ,num_inference_steps=1)
_lowerCAmelCase:Tuple = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase:Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase:Optional[Any] = SpeechTaHifiGan(a__).to(a__)
_lowerCAmelCase:int = audioldm_pipe(a__ ,num_inference_steps=1)
_lowerCAmelCase:List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__)
def __UpperCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=a__)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __UpperCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__)
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Optional[Any] ,a__ : int="cpu" ,a__ : str=torch.floataa ,a__ : Union[str, Any]=0) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Tuple = np.random.RandomState(a__).standard_normal((1, 8, 128, 16))
_lowerCAmelCase:List[str] = torch.from_numpy(a__).to(device=a__ ,dtype=a__)
_lowerCAmelCase:Dict = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_lowerCAmelCase:Optional[Any] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[int] = self.get_inputs(a__)
_lowerCAmelCase:Optional[Any] = 25
_lowerCAmelCase:int = audioldm_pipe(**a__).audios[0]
assert audio.ndim == 1
assert len(a__) == 8_1920
_lowerCAmelCase:int = audio[7_7230:7_7240]
_lowerCAmelCase:Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
_lowerCAmelCase:Dict = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_lowerCAmelCase:List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[Any] = self.get_inputs(a__)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe(**a__).audios[0]
assert audio.ndim == 1
assert len(a__) == 8_1920
_lowerCAmelCase:Tuple = audio[2_7780:2_7790]
_lowerCAmelCase:Optional[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
_lowerCAmelCase:Tuple = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 439
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case = logging.get_logger(__name__)
snake_case = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''instructblip_vision_model'''
def __init__( self : Tuple ,__A : Union[str, Any]=1408 ,__A : Optional[Any]=6144 ,__A : Tuple=39 ,__A : Any=16 ,__A : Any=224 ,__A : List[Any]=14 ,__A : Dict="gelu" ,__A : str=1e-6 ,__A : Optional[int]=0.0 ,__A : int=1e-10 ,__A : Tuple=True ,**__A : Any ,) -> str:
super().__init__(**__A )
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_range
_lowercase = attention_dropout
_lowercase = layer_norm_eps
_lowercase = hidden_act
_lowercase = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[str] ,__A : Union[str, os.PathLike] ,**__A : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''instructblip_qformer'''
def __init__( self : Optional[int] ,__A : str=3_0522 ,__A : str=768 ,__A : Tuple=12 ,__A : Tuple=12 ,__A : Any=3072 ,__A : List[str]="gelu" ,__A : List[str]=0.1 ,__A : int=0.1 ,__A : List[Any]=512 ,__A : Optional[int]=0.02 ,__A : Optional[Any]=1e-12 ,__A : Optional[int]=0 ,__A : Optional[int]="absolute" ,__A : Optional[Any]=2 ,__A : str=1408 ,**__A : str ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = cross_attention_frequency
_lowercase = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : Dict ,__A : Union[str, os.PathLike] ,**__A : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''instructblip'''
SCREAMING_SNAKE_CASE_ : Tuple = True
def __init__( self : Any ,__A : int=None ,__A : List[Any]=None ,__A : str=None ,__A : int=32 ,**__A : Optional[Any] ) -> Union[str, Any]:
super().__init__(**__A )
if vision_config is None:
_lowercase = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase = InstructBlipVisionConfig(**__A )
_lowercase = InstructBlipQFormerConfig(**__A )
_lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase = CONFIG_MAPPING[text_model_type](**__A )
_lowercase = self.text_config.tie_word_embeddings
_lowercase = self.text_config.is_encoder_decoder
_lowercase = num_query_tokens
_lowercase = self.vision_config.hidden_size
_lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase = 1.0
_lowercase = 0.02
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : InstructBlipVisionConfig ,__A : InstructBlipQFormerConfig ,__A : PretrainedConfig ,**__A : Tuple ,) -> List[Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__A ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.vision_config.to_dict()
_lowercase = self.qformer_config.to_dict()
_lowercase = self.text_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 67
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : str = 3_84
if "tiny" in model_name:
__A : Union[str, Any] = [3, 3, 9, 3]
__A : Any = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__A : str = [3, 3, 27, 3]
__A : Dict = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__A : Any = [3, 3, 27, 3]
__A : str = [1_28, 2_56, 5_12, 10_24]
__A : Optional[Any] = 5_12
if "large" in model_name:
__A : Dict = [3, 3, 27, 3]
__A : Any = [1_92, 3_84, 7_68, 15_36]
__A : str = 7_68
if "xlarge" in model_name:
__A : int = [3, 3, 27, 3]
__A : Optional[Any] = [2_56, 5_12, 10_24, 20_48]
__A : Optional[Any] = 10_24
# set label information
__A : int = 1_50
__A : int = 'huggingface/label-files'
__A : Any = 'ade20k-id2label.json'
__A : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
__A : List[Any] = {int(a ): v for k, v in idalabel.items()}
__A : List[Any] = {v: k for k, v in idalabel.items()}
__A : int = ConvNextConfig(
depths=a , hidden_sizes=a , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__A : Tuple = UperNetConfig(
backbone_config=a , auxiliary_in_channels=a , num_labels=a , idalabel=a , labelaid=a , )
return config
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Tuple:
__A : int = dct.pop(a )
__A : int = val
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
__A : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__A : List[str] = model_name_to_url[model_name]
__A : Tuple = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
__A : List[str] = get_upernet_config(a )
__A : Dict = UperNetForSemanticSegmentation(a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__A : str = state_dict.pop(a )
if "bn" in key:
__A : str = key.replace('bn' , 'batch_norm' )
__A : Optional[int] = val
# rename keys
__A : str = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
model.load_state_dict(a )
# verify on image
__A : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__A : str = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
__A : List[Any] = SegformerImageProcessor()
__A : str = processor(a , return_tensors='pt' ).pixel_values
with torch.no_grad():
__A : Tuple = model(a )
if model_name == "upernet-convnext-tiny":
__A : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__A : Dict = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__A : List[Any] = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__A : Union[str, Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__A : List[Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(a )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 239
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : CLIPSegForImageSegmentation , lowerCAmelCase : CLIPSegProcessor , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , ) -> Any:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE_: Optional[int] =(
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =dict(scheduler.config )
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Any =FrozenDict(lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE_: int =(
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =dict(scheduler.config )
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: List[Any] =FrozenDict(lowerCAmelCase )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=lowerCAmelCase , segmentation_processor=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_: Dict =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE_: List[Any] =torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : str , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE_: List[str] =self.segmentation_model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE_: Any =self.numpy_to_pil(lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , )
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
from math import isqrt
def UpperCAmelCase__( __UpperCAmelCase : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCAmelCase ) + 1 ) )
def UpperCAmelCase__( __UpperCAmelCase : int = 10**6 ):
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 576
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __snake_case ( lowercase : np.ndarray , lowercase : float ):
# For applying gaussian function for each element in matrix.
snake_case_ = math.sqrt(lowercase )
snake_case_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( lowercase : np.ndarray , lowercase : int , lowercase : int , lowercase : int ):
snake_case_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( lowercase : int , lowercase : float ):
# Creates a gaussian kernel of given dimension.
snake_case_ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase ):
for j in range(0 , lowercase ):
snake_case_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase , lowercase )
def __snake_case ( lowercase : np.ndarray , lowercase : float , lowercase : float , lowercase : int , ):
snake_case_ = np.zeros(img.shape )
snake_case_ = get_gauss_kernel(lowercase , lowercase )
snake_case_ , snake_case_ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case_ = get_slice(lowercase , lowercase , lowercase , lowercase )
snake_case_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ = vec_gaussian(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.sum(lowercase ) / np.sum(lowercase )
snake_case_ = val
return imga
def __snake_case ( lowercase : list ):
snake_case_ = args[1] if args[1:] else "../image_data/lena.jpg"
snake_case_ = float(args[2] ) if args[2:] else 1.0
snake_case_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ = int(args[4] )
snake_case_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase__ = img / 2_55
lowercase__ = out.astype('''float32''')
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 2_55
lowercase__ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 508
| 0
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=_A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.task_name.lower()
class A__ ( _A ):
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class A__ ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = Split.train , UpperCamelCase__ = None , ) -> Any:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase__ , )
A_ = args
A_ = glue_processors[args.task_name]()
A_ = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
A_ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
A_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
A_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
A_ = time.time()
A_ = torch.load(UpperCamelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
A_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A_ = self.processor.get_test_examples(args.data_dir )
else:
A_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A_ = examples[:limit_length]
A_ = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
A_ = time.time()
torch.save(self.features , UpperCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.label_list
| 710
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(__snake_case ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : str = """vivit"""
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=3_2 , lowerCamelCase_=[2, 1_6, 1_6] , lowerCamelCase_=3 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> int:
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : List[str] = layer_norm_eps
_a : Any = image_size
_a : Optional[Any] = num_frames
_a : Dict = tubelet_size
_a : Union[str, Any] = num_channels
_a : Optional[int] = qkv_bias
super().__init__(**lowerCamelCase_ )
| 120
| 0
|
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [[float("inf" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
SCREAMING_SNAKE_CASE__ : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE__ : Any = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
_lowerCamelCase : List[str] = int(input('''Enter number of vertices: '''))
_lowerCamelCase : Tuple = int(input('''Enter number of edges: '''))
_lowerCamelCase : Union[str, Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
_lowerCamelCase : List[str] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
_lowerCamelCase : int = int(input('''Enter source:'''))
_lowerCamelCase : Dict = int(input('''Enter destination:'''))
_lowerCamelCase : Any = float(input('''Enter weight:'''))
_lowerCamelCase : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 713
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = SwinConfig()
__UpperCAmelCase : int = swin_name.split("_" )
__UpperCAmelCase : List[Any] = name_split[1]
__UpperCAmelCase : List[str] = int(name_split[4] )
__UpperCAmelCase : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCAmelCase : Optional[int] = 96
__UpperCAmelCase : int = (2, 2, 6, 2)
__UpperCAmelCase : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__UpperCAmelCase : List[str] = 96
__UpperCAmelCase : Optional[int] = (2, 2, 18, 2)
__UpperCAmelCase : Tuple = (3, 6, 12, 24)
elif model_size == "base":
__UpperCAmelCase : Optional[int] = 128
__UpperCAmelCase : Optional[Any] = (2, 2, 18, 2)
__UpperCAmelCase : str = (4, 8, 16, 32)
else:
__UpperCAmelCase : Dict = 192
__UpperCAmelCase : int = (2, 2, 18, 2)
__UpperCAmelCase : Dict = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCAmelCase : Dict = 2_1841
else:
__UpperCAmelCase : List[str] = 1000
__UpperCAmelCase : str = '''huggingface/label-files'''
__UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Any = img_size
__UpperCAmelCase : Optional[Any] = num_classes
__UpperCAmelCase : Optional[int] = embed_dim
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Optional[int] = num_heads
__UpperCAmelCase : int = window_size
return config
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
if "patch_embed.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Any = name.replace("patch_embed.norm", "embeddings.norm" )
if "layers" in name:
__UpperCAmelCase : Dict = '''encoder.''' + name
if "attn.proj" in name:
__UpperCAmelCase : List[str] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
__UpperCAmelCase : List[Any] = name.replace("attn", "attention.self" )
if "norm1" in name:
__UpperCAmelCase : int = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : str = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : Dict = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : Dict = name.replace("mlp.fc2", "output.dense" )
if name == "norm.weight":
__UpperCAmelCase : List[Any] = '''layernorm.weight'''
if name == "norm.bias":
__UpperCAmelCase : Union[str, Any] = '''layernorm.bias'''
if "head" in name:
__UpperCAmelCase : Optional[Any] = name.replace("head", "classifier" )
else:
__UpperCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Union[str, Any] = orig_state_dict.pop(__lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCAmelCase : List[Any] = key.split("." )
__UpperCAmelCase : str = int(key_split[1] )
__UpperCAmelCase : Any = int(key_split[3] )
__UpperCAmelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase : Optional[int] = val[:dim, :]
__UpperCAmelCase : Optional[int] = val[
dim : dim * 2, :
]
__UpperCAmelCase : Any = val[-dim:, :]
else:
__UpperCAmelCase : Optional[Any] = val[
:dim
]
__UpperCAmelCase : str = val[
dim : dim * 2
]
__UpperCAmelCase : Optional[int] = val[
-dim:
]
else:
__UpperCAmelCase : Any = val
return orig_state_dict
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : Optional[Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
__UpperCAmelCase : Any = get_swin_config(__lowerCamelCase )
__UpperCAmelCase : List[str] = SwinForImageClassification(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = convert_state_dict(timm_model.state_dict(), __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
__UpperCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_", "-" ) ) )
__UpperCAmelCase : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
__UpperCAmelCase : int = image_processor(images=__lowerCamelCase, return_tensors="pt" )
__UpperCAmelCase : Any = timm_model(inputs["pixel_values"] )
__UpperCAmelCase : Tuple = model(**__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1e-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 382
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 560
| 0
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
pass
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = None
def __iter__( self ):
lowerCAmelCase = self
lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCAmelCase_ )
yield node.data
lowerCAmelCase = node.next_node
@property
def __snake_case ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ =Node(1)
UpperCAmelCase_ =Node(2)
UpperCAmelCase_ =Node(3)
UpperCAmelCase_ =Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ =root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ =Node(5)
UpperCAmelCase_ =Node(6)
UpperCAmelCase_ =Node(5)
UpperCAmelCase_ =Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ =Node(1)
print(root_node.has_loop) # False
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : List[str] = ['''image_processor''', '''tokenizer''']
__a : List[str] = '''FlavaImageProcessor'''
__a : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
__SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = False , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if images is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(
lowercase , return_image_mask=lowercase , return_codebook_pixels=lowercase , return_tensors=lowercase , **lowercase , )
if text is not None and images is not None:
encoding.update(lowercase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
@property
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase , )
return self.image_processor
| 158
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : str = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vocab_size - 1
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
__SCREAMING_SNAKE_CASE : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
__a : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__a : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__a : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
__SCREAMING_SNAKE_CASE : Any = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[str] = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase , n_embd=3_7 )
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def _snake_case ( self ) -> str:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowercase ) # the president is
__SCREAMING_SNAKE_CASE : Any = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : List[Any] = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 158
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( lowercase__, unittest.TestCase ):
'''simple docstring'''
_snake_case = None
_snake_case = BloomTokenizerFast
_snake_case = BloomTokenizerFast
_snake_case = True
_snake_case = False
_snake_case = '''tokenizer_file'''
_snake_case = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
UpperCamelCase = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
UpperCamelCase = tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase = '''This is a simple input'''
UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
UpperCamelCase = next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
UpperCamelCase = list(sample_data.values() )
UpperCamelCase = list(map(tokenizer.encode , UpperCAmelCase__ ) )
UpperCamelCase = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 700
|
'''simple docstring'''
import torch
from torch import nn
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
else:
self.out_projs.append(lowerCamelCase__ )
self.out_layers.append(nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
self.out_layers.append(nn.Linear(lowerCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if proj is None:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , lowerCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , lowerCamelCase__ )
UpperCamelCase = hidden.index_select(0 , lowerCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 350
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : str , lowercase : LevitConfig , lowercase : Path , lowercase : bool = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowerCamelCase_ = timm.create_model('levit_128s' , pretrained=lowercase )
else:
lowerCamelCase_ = timm.create_model('levit_128' , pretrained=lowercase )
if hidden_sizes == 1_92:
lowerCamelCase_ = timm.create_model('levit_192' , pretrained=lowercase )
if hidden_sizes == 2_56:
lowerCamelCase_ = timm.create_model('levit_256' , pretrained=lowercase )
if hidden_sizes == 3_84:
lowerCamelCase_ = timm.create_model('levit_384' , pretrained=lowercase )
from_model.eval()
lowerCamelCase_ = LevitForImageClassificationWithTeacher(lowercase ).eval()
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = from_model.state_dict()
lowerCamelCase_ = list(from_model.state_dict().keys() )
lowerCamelCase_ = list(our_model.state_dict().keys() )
print(len(lowercase ) , len(lowercase ) )
for i in range(len(lowercase ) ):
lowerCamelCase_ = weights[og_keys[i]]
our_model.load_state_dict(lowercase )
lowerCamelCase_ = torch.randn((2, 3, 2_24, 2_24) )
lowerCamelCase_ = from_model(lowercase )
lowerCamelCase_ = our_model(lowercase ).logits
assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one."
lowerCamelCase_ = name
print(lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( lowercase : Path , lowercase : str = None , lowercase : bool = True ):
'''simple docstring'''
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = 10_00
lowerCamelCase_ = (1, num_labels)
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = num_labels
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
lowerCamelCase_ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
lowerCamelCase_ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowerCamelCase : Optional[int] = parser.parse_args()
lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a : int
a : Tuple[int] =(16, 32, 96, 2_56)
a : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : List[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase : List[Any] = self.block_out_channels[i]
lowerCAmelCase : Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : List[str] = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : Tuple = blocks
lowerCAmelCase : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.conv_in(snake_case__ )
lowerCAmelCase : Dict = nn.silu(snake_case__ )
for block in self.blocks:
lowerCAmelCase : Any = block(snake_case__ )
lowerCAmelCase : Optional[Any] = nn.silu(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , lowercase , lowercase ):
"""simple docstring"""
a : int =32
a : int =4
a : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Union[bool, Tuple[bool]] =False
a : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
a : int =2
a : Union[int, Tuple[int]] =8
a : Optional[Union[int, Tuple[int]]] =None
a : int =12_80
a : float =0.0
a : bool =False
a : jnp.dtype =jnp.floataa
a : bool =True
a : int =0
a : str ="rgb"
a : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : List[Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase : Union[str, Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jax.random.split(snake_case__ )
lowerCAmelCase : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.block_out_channels
lowerCAmelCase : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : str = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase : Any = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = []
lowerCAmelCase : int = block_out_channels[0]
lowerCAmelCase : str = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Dict = output_channel
lowerCAmelCase : Any = block_out_channels[i]
lowerCAmelCase : Tuple = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase : int = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCAmelCase : Optional[int] = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCAmelCase : Dict = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCAmelCase : str = down_blocks
lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
lowerCAmelCase : Tuple = block_out_channels[-1]
lowerCAmelCase : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = False , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase : Optional[int] = jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCAmelCase : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : Optional[int] = jnp.expand_dims(snake_case__ , 0 )
lowerCAmelCase : Union[str, Any] = self.time_proj(snake_case__ )
lowerCAmelCase : Tuple = self.time_embedding(snake_case__ )
# 2. pre-process
lowerCAmelCase : Tuple = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Union[str, Any] = self.conv_in(snake_case__ )
lowerCAmelCase : List[Any] = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[int] = self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCAmelCase : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : int = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCAmelCase : Any = controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = controlnet_down_block_res_samples
lowerCAmelCase : Optional[Any] = self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCAmelCase : List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 645
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
a_ : Tuple = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__, id=SCREAMING_SNAKE_CASE__ )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
UpperCamelCase =None
def _lowerCamelCase ( self ) -> str:
__lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = os.path.join(UpperCamelCase_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase_ )
__lowercase : Union[str, Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Union[str, Any] = feat_extract_first.save_pretrained(UpperCamelCase_ )[0]
check_json_file_has_correct_format(UpperCamelCase_ )
__lowercase : Optional[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase_ )
| 76
|
import unittest
from transformers import DonutProcessor
lowercase : Optional[int] = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase )
| 327
| 0
|
'''simple docstring'''
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 605
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 605
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__UpperCamelCase : Any = TypeVar('_T')
class _UpperCamelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : str , _lowerCamelCase : Iterable[_T] | None = None ):
'''simple docstring'''
__lowerCamelCase : list[_T] = list(iterable or [] )
__lowerCamelCase : list[_T] = []
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ):
'''simple docstring'''
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _snake_case ( self : List[Any] , _lowerCamelCase : _T ):
'''simple docstring'''
self._stacka.append(_lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self._stacka.pop
__lowerCamelCase : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519
|
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCamelCase : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 2
while digits < n:
index += 1
__lowerCamelCase : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def _UpperCAmelCase ( UpperCAmelCase : int = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 519
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCAmelCase = str(lowercase )
_UpperCAmelCase = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowercase = 99 ):
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(9_9)}''')
| 275
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase ,id=lowercase )
| 275
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 77
|
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 254
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCAmelCase =random.Random()
def __a ( A , A=1.0 , A=None , A=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=4_00 , UpperCamelCase__=20_00 , UpperCamelCase__=10 , UpperCamelCase__=1_60 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=40_00 , UpperCamelCase__=False , UpperCamelCase__=True , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
A__ = feature_size
A__ = chunk_length
A__ = hop_length
def lowercase_ ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( __a , unittest.TestCase ):
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ):
'''simple docstring'''
A__ = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ = self.feature_extraction_class.from_pretrained(A__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(A__ , "feat_extract.json" )
feat_extract_first.to_json_file(A__ )
A__ = self.feature_extraction_class.from_json_file(A__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(A__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ = np.asarray(A__ )
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test truncation required
A__ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
A__ = [np.asarray(A__ ) for speech_input in speech_inputs]
A__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
A__ = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(A__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self ):
'''simple docstring'''
A__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = WhisperFeatureExtractor()
A__ = feature_extractor(A__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = self._load_datasamples(1 )[0]
A__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
A__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
| 700
|
"""simple docstring"""
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A ) == 26
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(A )
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=A ) )
print(timeit("is_pangram_faster()" , setup=A ) )
print(timeit("is_pangram_fastest()" , setup=A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 261
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.