code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase__ :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase__ :List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("\n".join(upper_files) + "\n")
lowercase__ :Optional[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("\n".join(space_files) + "\n")
lowercase__ :int = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("\n".join(hyphen_files) + "\n")
lowercase__ :int = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("\n".join(nodir_files) + "\n")
lowercase__ :Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 101
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.dummy_uncond_unet
__snake_case : int = ScoreSdeVeScheduler()
__snake_case : Tuple = ScoreSdeVePipeline(unet=a_ , scheduler=a_ )
sde_ve.to(a_ )
sde_ve.set_progress_bar_config(disable=a_ )
__snake_case : Any = torch.manual_seed(0 )
__snake_case : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=a_ ).images
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : str = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=a_ , return_dict=a_ )[
0
]
__snake_case : List[str] = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = '''google/ncsnpp-church-256'''
__snake_case : List[Any] = UNetaDModel.from_pretrained(a_ )
__snake_case : Dict = ScoreSdeVeScheduler.from_pretrained(a_ )
__snake_case : Any = ScoreSdeVePipeline(unet=a_ , scheduler=a_ )
sde_ve.to(a_ )
sde_ve.set_progress_bar_config(disable=a_ )
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : int = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=a_ ).images
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__snake_case : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Dict = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase_ : Optional[int] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Optional[int] = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase_ : Tuple = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : int ):
lowerCAmelCase_ : Dict = dct.pop(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = val
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
if "handwritten" in checkpoint_url:
lowerCAmelCase_ : Tuple = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase_ : str = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
lowerCAmelCase_ : List[str] = ViTConfig(image_size=384 ,qkv_bias=__UpperCamelCase )
lowerCAmelCase_ : Dict = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase_ : Tuple = 1024
lowerCAmelCase_ : str = 4096
lowerCAmelCase_ : Dict = 24
lowerCAmelCase_ : str = 16
lowerCAmelCase_ : int = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = '''relu'''
lowerCAmelCase_ : Optional[Any] = 1024
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Tuple = False
# load HuggingFace model
lowerCAmelCase_ : Optional[Any] = ViTModel(__UpperCamelCase ,add_pooling_layer=__UpperCamelCase )
lowerCAmelCase_ : Any = TrOCRForCausalLM(__UpperCamelCase )
lowerCAmelCase_ : List[str] = VisionEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='''cpu''' ,check_hash=__UpperCamelCase )['''model''']
lowerCAmelCase_ : Tuple = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase_ : List[Any] = state_dict.pop(__UpperCamelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowerCAmelCase_ : Optional[int] = val
else:
lowerCAmelCase_ : Optional[Any] = val
# load state dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
lowerCAmelCase_ : Tuple = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase_ : int = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowerCAmelCase_ : str = TrOCRProcessor(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : str = processor(images=prepare_img(__UpperCamelCase ) ,return_tensors='''pt''' ).pixel_values
# verify logits
lowerCAmelCase_ : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase_ : Tuple = model(pixel_values=__UpperCamelCase ,decoder_input_ids=__UpperCamelCase )
lowerCAmelCase_ : Any = outputs.logits
lowerCAmelCase_ : Optional[int] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase_ : str = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase_ : Dict = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase_ : str = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,__UpperCamelCase ,atol=1e-3 ), "First elements of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ : Dict = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 103
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'xlm'
SCREAMING_SNAKE_CASE : str = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any]=3_0_1_4_5 ,lowercase__ : Optional[int]=2_0_4_8 ,lowercase__ : Any=1_2 ,lowercase__ : Any=1_6 ,lowercase__ : Dict=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Tuple=True ,lowercase__ : Tuple=False ,lowercase__ : Dict=False ,lowercase__ : Dict=False ,lowercase__ : Optional[int]=1 ,lowercase__ : Any=True ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Union[str, Any]=2_0_4_8**-0.5 ,lowercase__ : Any=1e-1_2 ,lowercase__ : Tuple=0.0_2 ,lowercase__ : Optional[int]=0 ,lowercase__ : Any=1 ,lowercase__ : str=2 ,lowercase__ : Optional[Any]=3 ,lowercase__ : Optional[int]=5 ,lowercase__ : List[Any]=True ,lowercase__ : List[Any]="first" ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=None ,lowercase__ : int=True ,lowercase__ : int=0.1 ,lowercase__ : Optional[Any]=5 ,lowercase__ : int=5 ,lowercase__ : List[str]=0 ,lowercase__ : List[str]=0 ,lowercase__ : Any=2 ,lowercase__ : List[str]=0 ,**lowercase__ : str ,):
__lowercase = vocab_size
__lowercase = emb_dim
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = gelu_activation
__lowercase = sinusoidal_embeddings
__lowercase = causal
__lowercase = asm
__lowercase = n_langs
__lowercase = use_lang_emb
__lowercase = layer_norm_eps
__lowercase = bos_index
__lowercase = eos_index
__lowercase = pad_index
__lowercase = unk_index
__lowercase = mask_index
__lowercase = is_encoder
__lowercase = max_position_embeddings
__lowercase = embed_init_std
__lowercase = init_std
__lowercase = summary_type
__lowercase = summary_use_proj
__lowercase = summary_activation
__lowercase = summary_proj_to_labels
__lowercase = summary_first_dropout
__lowercase = start_n_top
__lowercase = end_n_top
__lowercase = mask_token_id
__lowercase = lang_id
if "n_words" in kwargs:
__lowercase = kwargs['''n_words''']
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,**lowercase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : int ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 104
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 0
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCamelCase : Optional[int] = {
'''n_samples''': 6_4,
'''horizon''': 3_2,
'''num_inference_steps''': 2_0,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = '''hopper-medium-v2'''
__UpperCamelCase : Optional[int] = gym.make(env_name)
__UpperCamelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
__UpperCamelCase : List[Any] = env.reset()
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 1_0_0_0
__UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCamelCase : Optional[int] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = env.step(denorm_actions)
__UpperCamelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCamelCase : str = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 106
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 107
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="ctrl"
a : Dict =["past_key_values"]
a : Any ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=246_534 , snake_case__=256 , snake_case__=1_280 , snake_case__=8_192 , snake_case__=48 , snake_case__=16 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=0.02 , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = vocab_size
lowerCAmelCase : str = n_positions
lowerCAmelCase : Dict = n_embd
lowerCAmelCase : List[Any] = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Optional[Any] = dff
lowerCAmelCase : List[str] = resid_pdrop
lowerCAmelCase : List[str] = embd_pdrop
lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = use_cache
super().__init__(**snake_case__ )
| 108
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = hidden_states.shape
UpperCAmelCase : str = jax.image.resize(
_SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
UpperCAmelCase : Optional[int] = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int = None
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = None
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase : Optional[Any] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase : str = nn.Dense(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
UpperCAmelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase : List[str] = nn.Dropout(self.dropout_prob )
UpperCAmelCase : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase : Dict = None
if use_nin_shortcut:
UpperCAmelCase : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = hidden_states
UpperCAmelCase : Dict = self.norma(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = nn.swish(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.conva(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.time_emb_proj(nn.swish(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = jnp.expand_dims(jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , 1 )
UpperCAmelCase : Union[str, Any] = hidden_states + temb
UpperCAmelCase : Any = self.norma(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = nn.swish(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.conva(_SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
UpperCAmelCase : Dict = self.conv_shortcut(_SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 109
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a ( UpperCamelCase__ ):
def __init__( self: Tuple , UpperCamelCase_: Callable , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[dict] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 110
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_UpperCamelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Optional[Any] , lowerCamelCase_ : Tuple = True , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Any] = PILImageResampling.BILINEAR , lowerCamelCase_ : List[Any] = True , lowerCamelCase_ : str = None , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : Any = 1 / 255 , lowerCamelCase_ : int = True , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : List[Any] = None , **lowerCamelCase_ : Any , ):
"""simple docstring"""
super().__init__(**__a )
UpperCamelCase = size if size is not None else {"""shortest_edge""": 256}
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = get_size_dict(__a , param_name="""crop_size""" )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = offset
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict = PILImageResampling.BILINEAR , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
UpperCamelCase = get_resize_output_image_size(__a , size["""shortest_edge"""] , default_to_square=__a )
elif "height" in size and "width" in size:
UpperCamelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : Any = None , **lowerCamelCase_ : int , ):
"""simple docstring"""
UpperCamelCase = image.astype(np.floataa )
if offset:
UpperCamelCase = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] = None , **lowerCamelCase_ : Dict , ):
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Dict = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Union[str, Any] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = to_numpy_array(__a )
if do_resize:
UpperCamelCase = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
UpperCamelCase = self.center_crop(__a , size=__a )
if do_rescale:
UpperCamelCase = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
UpperCamelCase = self.normalize(image=__a , mean=__a , std=__a )
UpperCamelCase = to_channel_dimension_format(__a , __a )
return image
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Any = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Optional[int] = ChannelDimension.FIRST , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = offset if offset is not None else self.offset
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(__a , param_name="""crop_size""" )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCamelCase = make_batched(__a )
UpperCamelCase = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
UpperCamelCase = {"""pixel_values""": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class A ( lowerCAmelCase__ ):
UpperCamelCase__ : int ="""mra"""
def __init__( self : Optional[Any] , lowercase_ : List[str]=5_0265 , lowercase_ : int=768 , lowercase_ : Optional[int]=12 , lowercase_ : Dict=12 , lowercase_ : Tuple=3072 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=512 , lowercase_ : List[Any]=1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Tuple=4 , lowercase_ : Union[str, Any]="full" , lowercase_ : str=0 , lowercase_ : Dict=0 , lowercase_ : int=1 , lowercase_ : Any=0 , lowercase_ : Dict=2 , **lowercase_ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_lowerCamelCase : Union[str, Any] =vocab_size
_lowerCamelCase : Tuple =max_position_embeddings
_lowerCamelCase : Union[str, Any] =hidden_size
_lowerCamelCase : Any =num_hidden_layers
_lowerCamelCase : int =num_attention_heads
_lowerCamelCase : Tuple =intermediate_size
_lowerCamelCase : List[str] =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : Optional[int] =attention_probs_dropout_prob
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Any =type_vocab_size
_lowerCamelCase : Tuple =layer_norm_eps
_lowerCamelCase : List[str] =position_embedding_type
_lowerCamelCase : Dict =block_per_row
_lowerCamelCase : Dict =approx_mode
_lowerCamelCase : str =initial_prior_first_n_blocks
_lowerCamelCase : Dict =initial_prior_diagonal_n_blocks
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase__ = 2_5_6_0_4_7
UpperCamelCase__ = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = NllbTokenizer
lowerCAmelCase__ = NllbTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Dict = NllbTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = NllbTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Optional[int] = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ : Dict = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Dict = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Any = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.test_seqaseq:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__ : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCAmelCase__ : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Dict = tokenizer.prepare_seqaseq_batch(
__a , tgt_texts=__a , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : int = tokenizer.prepare_seqaseq_batch(
src_texts=__a , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , __a )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = [AddedToken('''<special>''' , lstrip=__a )]
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
UpperCAmelCase__ : int = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode('''<special>''' , add_special_tokens=__a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a , )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCAmelCase__ : int = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def lowercase_ ( cls : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
UpperCAmelCase__ : Dict = 1
return cls
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256_057 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Optional[int] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __a )
UpperCAmelCase__ : List[str] = 10
UpperCAmelCase__ : List[Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __a )
self.assertEqual(len(__a ) , __a )
def lowercase_ ( self : str ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [256_203, 3] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase__ : Union[str, Any] = NllbTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCAmelCase__ : List[Any] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='''pt''' )
UpperCAmelCase__ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors='''pt''' )
UpperCAmelCase__ : Union[str, Any] = targets['''input_ids''']
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
__a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[256_047, 70, 7_356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256_057,
} , )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 181
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = 0.00
SCREAMING_SNAKE_CASE__ : Any = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE__ : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_UpperCamelCase )
first_sum += 1 / float(_UpperCamelCase )
index += 1
return 1 / first_sum
def _lowercase ( __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = 0.00
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(_UpperCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
_snake_case = "2020.9.26"
_snake_case = "xcodz-dot, cclaus, dhruvmanila"
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not all(isinstance(_UpperCamelCase , (float, int) ) for val in locals().values() ):
_a : Optional[Any] = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_UpperCamelCase )
_a : Union[str, Any] = ((x * distance) / (z + distance)) * scale
_a : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("""Axis must be a str""" )
_a : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(_UpperCamelCase , (float, int) ) for val in input_variables.values() ):
_a : Tuple = (
"""Input values except axis must either be float or int: """
F"""{list(input_variables.values() )}"""
)
raise TypeError(_UpperCamelCase )
_a : Optional[int] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
_a : int = x * math.cos(_UpperCamelCase ) - y * math.sin(_UpperCamelCase )
_a : Union[str, Any] = y * math.cos(_UpperCamelCase ) + x * math.sin(_UpperCamelCase )
_a : str = z
elif axis == "x":
_a : Optional[int] = y * math.cos(_UpperCamelCase ) - z * math.sin(_UpperCamelCase )
_a : Union[str, Any] = z * math.cos(_UpperCamelCase ) + y * math.sin(_UpperCamelCase )
_a : Optional[int] = x
elif axis == "y":
_a : Union[str, Any] = x * math.cos(_UpperCamelCase ) - z * math.sin(_UpperCamelCase )
_a : Tuple = z * math.cos(_UpperCamelCase ) + x * math.sin(_UpperCamelCase )
_a : int = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
| 294
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( lowerCAmelCase__, lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = """pixel_values"""
lowerCamelCase__ = False
lowerCamelCase__ = TimmBackboneConfig
def __init__( self , lowercase , **lowercase ):
requires_backends(self , 'timm' )
super().__init__(__a )
_lowerCamelCase : List[str] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(__a , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
_lowerCamelCase : List[str] = getattr(__a , 'use_pretrained_backbone' , __a )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCamelCase : Any = config.out_indices if getattr(__a , 'out_indices' , __a ) is not None else (-1,)
_lowerCamelCase : List[Any] = timm.create_model(
config.backbone , pretrained=__a , features_only=config.features_only , in_chans=config.num_channels , out_indices=__a , **__a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCamelCase : List[str] = self._backbone.return_layers
_lowerCamelCase : List[str] = {layer['module']: str(__a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__a )
@classmethod
def A_ ( cls , lowercase , *lowercase , **lowercase ):
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCamelCase : Dict = kwargs.pop('config' , TimmBackboneConfig() )
_lowerCamelCase : Dict = kwargs.pop('use_timm_backbone' , __a )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
_lowerCamelCase : Dict = kwargs.pop('num_channels' , config.num_channels )
_lowerCamelCase : str = kwargs.pop('features_only' , config.features_only )
_lowerCamelCase : Optional[int] = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
_lowerCamelCase : Dict = kwargs.pop('out_indices' , config.out_indices )
_lowerCamelCase : Optional[int] = TimmBackboneConfig(
backbone=__a , num_channels=__a , features_only=__a , use_pretrained_backbone=__a , out_indices=__a , )
return super()._from_config(__a , **__a )
def A_ ( self , lowercase ):
pass
def A_ ( self , lowercase , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
_lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCamelCase : Optional[Any] = self._all_layers
_lowerCamelCase : Dict = self._backbone(__a , **__a )
_lowerCamelCase : List[str] = self._return_layers
_lowerCamelCase : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCamelCase : Union[str, Any] = self._backbone(__a , **__a )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Tuple = tuple(__a )
_lowerCamelCase : Union[str, Any] = tuple(__a ) if hidden_states is not None else None
if not return_dict:
_lowerCamelCase : Tuple = (feature_maps,)
if output_hidden_states:
_lowerCamelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__a , hidden_states=__a , attentions=__a )
| 96
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 0
|
_snake_case = "Input must be a string of 8 numbers plus letter"
_snake_case = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(_UpperCamelCase,_UpperCamelCase ):
_A : Tuple = f'''Expected string as input, found {type(_UpperCamelCase ).__name__}'''
raise TypeError(_UpperCamelCase )
_A : str = spanish_id.replace("""-""","""""" ).upper()
if len(_UpperCamelCase ) != 9:
raise ValueError(_UpperCamelCase )
try:
_A : int = int(spanish_id_clean[0:8] )
_A : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(_UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a , 'num_attention_heads' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=6_4 , lowercase=3 , lowercase=3 , lowercase=2 , lowercase=1 , lowercase=1_6 , lowercase=[1_2_8, 2_5_6, 3_8_4] , lowercase=[4, 6, 8] , lowercase=[2, 3, 4] , lowercase=[1_6, 1_6, 1_6] , lowercase=0 , lowercase=[2, 2, 2] , lowercase=[2, 2, 2] , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=2 , ):
"""simple docstring"""
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : List[str] = image_size
A_ : Union[str, Any] = num_channels
A_ : Tuple = kernel_size
A_ : List[Any] = stride
A_ : str = padding
A_ : str = hidden_sizes
A_ : Tuple = num_attention_heads
A_ : Optional[int] = depths
A_ : List[Any] = key_dim
A_ : Tuple = drop_path_rate
A_ : Tuple = patch_size
A_ : Dict = attention_ratio
A_ : Optional[int] = mlp_ratio
A_ : str = initializer_range
A_ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A_ : Tuple = is_training
A_ : int = use_labels
A_ : Optional[int] = num_labels
A_ : str = initializer_range
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = LevitModel(config=__a )
model.to(__a )
model.eval()
A_ : Union[str, Any] = model(__a )
A_ : str = (self.image_size, self.image_size)
A_ , A_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
A_ : Tuple = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A_ : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self.num_labels
A_ : Optional[Any] = LevitForImageClassification(__a )
model.to(__a )
model.eval()
A_ : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : int = config_and_inputs
A_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = LevitModelTester(self )
A_ : str = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=3_7 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(__a )
A_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
A_ : Any = model(**self._prepare_for_class(__a , __a ) )
A_ : Tuple = outputs.hidden_states
A_ : Tuple = len(self.model_tester.depths ) + 1
self.assertEqual(len(__a ) , __a )
A_ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
A_ , A_ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
A_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A_ : int = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
A_ : Tuple = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A_ : Optional[int] = model_class(__a )
model.to(__a )
model.train()
A_ : Dict = self._prepare_for_class(__a , __a , return_labels=__a )
A_ : Optional[int] = model(**__a ).loss
loss.backward()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Union[str, Any] = False
A_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A_ : Any = model_class(__a )
model.gradient_checkpointing_enable()
model.to(__a )
model.train()
A_ : Any = self._prepare_for_class(__a , __a , return_labels=__a )
A_ : Optional[int] = model(**__a ).loss
loss.backward()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
A_ : Dict = problem_type['title']
A_ : List[Any] = problem_type['num_labels']
A_ : Optional[Any] = model_class(__a )
model.to(__a )
model.train()
A_ : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
if problem_type["num_labels"] > 1:
A_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
A_ : str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__a ) as warning_list:
A_ : Optional[int] = model(**__a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = LevitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
A_ : Any = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
A_ : str = model(**__a )
# verify the logits
A_ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __a )
A_ : Union[str, Any] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 140
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 0
|
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
pass
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
__lowercase = [
[],
[],
[],
]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__a )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ) -> Tuple:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
__lowercase = []
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if len(self.queue ) == 1_00:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__a )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
__lowercase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ) -> Any:
'''simple docstring'''
return str(self.queue )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 210
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_snake_case : Optional[int] = logging.get_logger("transformers.models.encodec")
_snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_snake_case : Any = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_snake_case : int = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_snake_case : Optional[int] = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_snake_case : Tuple = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_snake_case : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_snake_case : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_snake_case : Any = []
_snake_case : List[str] = []
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for attribute in key.split("." ):
__snake_case : List[str] = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__snake_case : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__snake_case : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__snake_case : Any = value
elif weight_type == "weight_g":
__snake_case : Optional[Any] = value
elif weight_type == "weight_v":
__snake_case : Any = value
elif weight_type == "bias":
__snake_case : Dict = value
elif weight_type == "running_mean":
__snake_case : List[str] = value
elif weight_type == "running_var":
__snake_case : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__snake_case : Dict = value
elif weight_type == "weight_ih_l0":
__snake_case : Optional[Any] = value
elif weight_type == "weight_hh_l0":
__snake_case : str = value
elif weight_type == "bias_ih_l0":
__snake_case : List[Any] = value
elif weight_type == "bias_hh_l0":
__snake_case : Optional[int] = value
elif weight_type == "weight_ih_l1":
__snake_case : Optional[Any] = value
elif weight_type == "weight_hh_l1":
__snake_case : Dict = value
elif weight_type == "bias_ih_l1":
__snake_case : Optional[int] = value
elif weight_type == "bias_hh_l1":
__snake_case : Tuple = value
else:
__snake_case : Optional[Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__snake_case , __snake_case : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = []
if model_name == "encodec_24khz" or "encodec_32khz":
__snake_case : Any = MAPPING_24K
elif model_name == "encodec_48khz":
__snake_case : Dict = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_UpperCamelCase , _UpperCamelCase ):
logger.info(F'{name} was ignored' )
continue
__snake_case : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__snake_case , __snake_case : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
__snake_case : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
__snake_case : Any = True
if "*" in mapped_key:
__snake_case : Any = name.split(_UpperCamelCase )[0].split("." )[-2]
__snake_case : Any = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
__snake_case : Tuple = "weight_g"
elif "weight_v" in name:
__snake_case : List[Any] = "weight_v"
elif "weight_ih_l0" in name:
__snake_case : Union[str, Any] = "weight_ih_l0"
elif "weight_hh_l0" in name:
__snake_case : Any = "weight_hh_l0"
elif "bias_ih_l0" in name:
__snake_case : List[str] = "bias_ih_l0"
elif "bias_hh_l0" in name:
__snake_case : Optional[int] = "bias_hh_l0"
elif "weight_ih_l1" in name:
__snake_case : str = "weight_ih_l1"
elif "weight_hh_l1" in name:
__snake_case : Any = "weight_hh_l1"
elif "bias_ih_l1" in name:
__snake_case : List[str] = "bias_ih_l1"
elif "bias_hh_l1" in name:
__snake_case : Dict = "bias_hh_l1"
elif "bias" in name:
__snake_case : Optional[int] = "bias"
elif "weight" in name:
__snake_case : Union[str, Any] = "weight"
elif "running_mean" in name:
__snake_case : Dict = "running_mean"
elif "running_var" in name:
__snake_case : Optional[int] = "running_var"
elif "num_batches_tracked" in name:
__snake_case : List[Any] = "num_batches_tracked"
else:
__snake_case : List[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ):
if config_path is not None:
__snake_case : Union[str, Any] = EncodecConfig.from_pretrained(_UpperCamelCase )
else:
__snake_case : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__snake_case : Union[str, Any] = [8, 5, 4, 4]
__snake_case : str = [2.2]
__snake_case : Optional[Any] = 6_4
__snake_case : Tuple = 3_2_0_0_0
__snake_case : Optional[Any] = 2_0_4_8
__snake_case : List[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Any = False
elif model_name == "encodec_48khz":
__snake_case : Any = [8, 5, 4, 2]
__snake_case : Optional[Any] = [3.0, 6.0, 1_2.0, 2_4.0]
__snake_case : Optional[Any] = 4_8_0_0_0
__snake_case : Dict = 2
__snake_case : Optional[Any] = False
__snake_case : str = "time_group_norm"
__snake_case : str = True
__snake_case : Any = 1.0
__snake_case : str = 0.0_1
else:
raise ValueError(F'Unknown model name: {model_name}' )
__snake_case : List[str] = EncodecModel(_UpperCamelCase )
__snake_case : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_UpperCamelCase )
__snake_case : Tuple = torch.load(_UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__snake_case : Tuple = original_checkpoint["best_state"]
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_snake_case : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 123
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def snake_case ( self , UpperCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case ( self ):
"""simple docstring"""
self.enable_attention_slicing(__a )
def snake_case ( self ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self ):
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**__a )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 55
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 0
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
while second != 0:
UpperCamelCase = first & second
first ^= second
UpperCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("""Enter the first number: """).strip())
_SCREAMING_SNAKE_CASE = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 343
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict =LxmertConfig.from_json_file(_UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[Any] =LxmertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 199
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase_ ( pl.LightningModule ):
def __init__( self : int , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Dict = model
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = LongformerModel.from_pretrained(_UpperCamelCase )
UpperCAmelCase__ : List[str] = LightningModel(_UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.load(_UpperCamelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
UpperCAmelCase__ : Tuple = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 181
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a :Any = False
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
return 12
@property
def _a ( self ) -> str:
"""simple docstring"""
return 12
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__a )
@property
def _a ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 12
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
SCREAMING_SNAKE_CASE__ : List[Any] = TransformeraDModel(**__a )
return model
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """cpu"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : str = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_transformer
SCREAMING_SNAKE_CASE__ : List[Any] = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ : List[str] = LearnedClassifierFreeSamplingEmbeddings(learnable=__a )
SCREAMING_SNAKE_CASE__ : Dict = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE__ : Tuple = """teddy bear playing in the pool"""
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=__a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(
[prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cpu"""
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_transformer
SCREAMING_SNAKE_CASE__ : str = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=__a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
SCREAMING_SNAKE_CASE__ : str = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE__ : Dict = """teddy bear playing in the pool"""
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=__a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=__a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(
[prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
SCREAMING_SNAKE_CASE__ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=__a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 132
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
return "".join(chr(ord(_UpperCamelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 96
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Any = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : List[Any] = parser.parse_args()
return args.f
class lowercase ( lowerCAmelCase__ ):
def a__ ( self ) -> Union[str, Any]:
_A : Any = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
def a__ ( self , _a ) -> Tuple:
_A : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(__a , """argv""" , __a ):
_A : Dict = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> List[str]:
_A : Any = """\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n """.split()
self.run_and_check(__a )
_A : List[str] = """\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n """.split()
self.run_and_check(__a )
_A : Optional[Any] = """\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n """.split()
self.run_and_check(__a )
| 26
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 140
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__a : List[str] = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : Tuple = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : Dict = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__a : Optional[Any] = {
"num_train_timesteps": 4_0,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__a : List[str] = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__a : Optional[Any] = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
__lowercase = checkpoint[F"{old_prefix}.in_layers.0.weight"]
__lowercase = checkpoint[F"{old_prefix}.in_layers.0.bias"]
__lowercase = checkpoint[F"{old_prefix}.in_layers.2.weight"]
__lowercase = checkpoint[F"{old_prefix}.in_layers.2.bias"]
__lowercase = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
__lowercase = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
__lowercase = checkpoint[F"{old_prefix}.out_layers.0.weight"]
__lowercase = checkpoint[F"{old_prefix}.out_layers.0.bias"]
__lowercase = checkpoint[F"{old_prefix}.out_layers.3.weight"]
__lowercase = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
__lowercase = checkpoint[F"{old_prefix}.skip_connection.weight"]
__lowercase = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__lowercase , __lowercase , __lowercase = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__lowercase = checkpoint[F"{old_prefix}.norm.weight"]
__lowercase = checkpoint[F"{old_prefix}.norm.bias"]
__lowercase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowercase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowercase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowercase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowercase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowercase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowercase = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__lowercase = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )
__lowercase = {}
__lowercase = checkpoint['''time_embed.0.weight''']
__lowercase = checkpoint['''time_embed.0.bias''']
__lowercase = checkpoint['''time_embed.2.weight''']
__lowercase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowercase = checkpoint['''label_emb.weight''']
__lowercase = checkpoint['''input_blocks.0.0.weight''']
__lowercase = checkpoint['''input_blocks.0.0.bias''']
__lowercase = unet_config['''down_block_types''']
__lowercase = unet_config['''layers_per_block''']
__lowercase = unet_config['''attention_head_dim''']
__lowercase = unet_config['''block_out_channels''']
__lowercase = 1
__lowercase = channels_list[0]
for i, layer_type in enumerate(_UpperCamelCase ):
__lowercase = channels_list[i]
__lowercase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCamelCase ):
__lowercase = F"down_blocks.{i}.resnets.{j}"
__lowercase = F"input_blocks.{current_layer}.0"
__lowercase = True if j == 0 and downsample_block_has_skip else False
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCamelCase ):
__lowercase = F"down_blocks.{i}.resnets.{j}"
__lowercase = F"input_blocks.{current_layer}.0"
__lowercase = True if j == 0 and downsample_block_has_skip else False
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowercase = F"down_blocks.{i}.attentions.{j}"
__lowercase = F"input_blocks.{current_layer}.1"
__lowercase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowercase = F"down_blocks.{i}.downsamplers.0"
__lowercase = F"input_blocks.{current_layer}.0"
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
__lowercase = current_channels
# hardcoded the mid-block for now
__lowercase = '''mid_block.resnets.0'''
__lowercase = '''middle_block.0'''
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowercase = '''mid_block.attentions.0'''
__lowercase = '''middle_block.1'''
__lowercase = convert_attention(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowercase = '''mid_block.resnets.1'''
__lowercase = '''middle_block.2'''
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowercase = 0
__lowercase = unet_config['''up_block_types''']
for i, layer_type in enumerate(_UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowercase = F"up_blocks.{i}.resnets.{j}"
__lowercase = F"output_blocks.{current_layer}.0"
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowercase = F"up_blocks.{i}.upsamplers.0"
__lowercase = F"output_blocks.{current_layer-1}.1"
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowercase = F"up_blocks.{i}.resnets.{j}"
__lowercase = F"output_blocks.{current_layer}.0"
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowercase = F"up_blocks.{i}.attentions.{j}"
__lowercase = F"output_blocks.{current_layer}.1"
__lowercase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowercase = F"up_blocks.{i}.upsamplers.0"
__lowercase = F"output_blocks.{current_layer-1}.2"
__lowercase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowercase = checkpoint['''out.0.weight''']
__lowercase = checkpoint['''out.0.bias''']
__lowercase = checkpoint['''out.2.weight''']
__lowercase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__a : Dict = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
__a : Optional[Any] = parser.parse_args()
__a : int = strabool(args.class_cond)
__a : Dict = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__a : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__a : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__a : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__a : Union[str, Any] = None
__a : int = con_pt_to_diffuser(args.unet_path, unet_config)
__a : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__a : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__a : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__a : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__a : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
__a : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 210
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 0
|
from ..utils import DummyObject, requires_backends
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[str] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : List[str] , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[str] , **lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
requires_backends(_UpperCamelCase , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Any ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : int , **lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : List[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : str , **lowerCamelCase : str ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : Optional[Any] , **lowerCamelCase : int ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Any , **lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : str , **lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Optional[int] , **lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : int , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> str:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Dict , **lowerCamelCase : int ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : str , **lowerCamelCase : Any ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : str , **lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : int ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Optional[int] , **lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : Any , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Dict , *lowerCamelCase : str , **lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Any , **lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : int ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Tuple ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Any , **lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["""torch"""]
def __init__( self : Any , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Any , **lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : Any , **lowerCamelCase : Any ) -> Any:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> List[str]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : str , **lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[str] , **lowerCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : int ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["""torch"""]
def __init__( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Dict:
requires_backends(cls , ["torch"] )
class a (metaclass=lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["""torch"""]
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Optional[int] , **lowerCamelCase : int ) -> List[str]:
requires_backends(cls , ["torch"] )
| 123
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 0
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ) ) )
def __snake_case ( UpperCAmelCase_ : Tuple ):
if point:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for item in point:
if not isinstance(_UpperCamelCase , (int, float) ):
lowerCamelCase_ = (
"Expected a list of numbers as input, found "
F'''{type(_UpperCamelCase ).__name__}'''
)
raise TypeError(_UpperCamelCase )
else:
lowerCamelCase_ = F'''Expected a list of numbers as input, found {type(_UpperCamelCase ).__name__}'''
raise TypeError(_UpperCamelCase )
else:
raise ValueError("Missing an input" )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCamelCase , _UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 0
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
UpperCamelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
UpperCamelCase = 1 - (matter_density + radiation_density + dark_energy)
UpperCamelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCamelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_SCREAMING_SNAKE_CASE = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : Dict =original_name.split('.' )[0]
_lowerCamelCase : Union[str, Any] =key.split('.' )
_lowerCamelCase : List[Any] =int(key_list[key_list.index(_UpperCamelCase ) - 2] )
_lowerCamelCase : Optional[Any] =int(key_list[key_list.index(_UpperCamelCase ) - 1] )
_lowerCamelCase : Optional[int] =orig_block_num - offset
_lowerCamelCase : Union[str, Any] =key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =OrderedDict()
_lowerCamelCase , _lowerCamelCase : Dict =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_lowerCamelCase : List[str] =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : str =key[: key.find('proj' )]
_lowerCamelCase : List[Any] =key.replace(_UpperCamelCase , F'''patch_embeddings.{total_embed_found}.''' )
_lowerCamelCase : List[str] =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : Any ='poolformer.encoder.' + key
if "mlp.fc1" in key:
_lowerCamelCase : Union[str, Any] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_lowerCamelCase : Tuple =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_lowerCamelCase : Optional[int] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'norm1' , 'before_norm' )
if "norm2" in key:
_lowerCamelCase : Any =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_lowerCamelCase : Optional[Any] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_lowerCamelCase : List[str] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_lowerCamelCase : int =key.replace('head' , 'classifier' )
_lowerCamelCase : Dict =value
return new_state_dict
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Any ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Any =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict =PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : str ='huggingface/label-files'
_lowerCamelCase : List[Any] =model_name[-3:]
_lowerCamelCase : Optional[Any] =1_000
_lowerCamelCase : int ='imagenet-1k-id2label.json'
_lowerCamelCase : Optional[int] =(1, 1_000)
# set config attributes
_lowerCamelCase : Dict =json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : List[Any] ={int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any =idalabel
_lowerCamelCase : Optional[Any] ={v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : Optional[int] =[2, 2, 6, 2]
_lowerCamelCase : str =[64, 128, 320, 512]
_lowerCamelCase : List[str] =4.0
_lowerCamelCase : Optional[int] =0.9
elif size == "s24":
_lowerCamelCase : Dict =[4, 4, 12, 4]
_lowerCamelCase : List[Any] =[64, 128, 320, 512]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Optional[Any] =0.9
elif size == "s36":
_lowerCamelCase : Dict =[6, 6, 18, 6]
_lowerCamelCase : Optional[Any] =[64, 128, 320, 512]
_lowerCamelCase : Tuple =4.0
_lowerCamelCase : List[str] =1e-6
_lowerCamelCase : Any =0.9
elif size == "m36":
_lowerCamelCase : List[str] =[6, 6, 18, 6]
_lowerCamelCase : Optional[int] =[96, 192, 384, 768]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Tuple =1e-6
_lowerCamelCase : str =0.95
elif size == "m48":
_lowerCamelCase : str =[8, 8, 24, 8]
_lowerCamelCase : List[str] =[96, 192, 384, 768]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Tuple =1e-6
_lowerCamelCase : Tuple =0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_lowerCamelCase : Tuple =PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
_lowerCamelCase : str =prepare_img()
_lowerCamelCase : str =image_processor(images=_UpperCamelCase , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_lowerCamelCase : List[Any] =torch.load(_UpperCamelCase , map_location=torch.device('cpu' ) )
# rename keys
_lowerCamelCase : Any =rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : List[str] =PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
_lowerCamelCase : List[str] =PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
_lowerCamelCase : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_lowerCamelCase : Tuple =model(_UpperCamelCase )
_lowerCamelCase : Optional[int] =outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : List[Any] =torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCamelCase : int =torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCamelCase : str =torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCamelCase : List[str] =torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCamelCase : Any =torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
'''simple docstring'''
import operator as op
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Any = lambda lowerCAmelCase__ , lowerCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase__ : Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_UpperCamelCase ) , sep=''' | ''' )
else:
UpperCAmelCase__ : Tuple = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_UpperCamelCase ) , sep=''' | ''' )
UpperCAmelCase__ : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_UpperCamelCase ) , sep=''' | ''' )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_UpperCamelCase ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
UpperCamelCase__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 181
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 0
|
"""simple docstring"""
import os
import string
import sys
a :Union[str, Any] = 1 << 8
a :Optional[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a :Union[str, Any] = KEYMAP["up"]
a :int = KEYMAP["left"]
if sys.platform == "win32":
a :int = []
a :str = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a :Dict = ord(str(i))
def _lowercase ( ) -> str:
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE__ : List[str] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE__ : Optional[int] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chr(KEYMAP["""esc"""] )
except KeyError:
SCREAMING_SNAKE_CASE__ : Dict = cha[1]
else:
SCREAMING_SNAKE_CASE__ : List[str] = ch.decode(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE__ : List[str] = sys.stdin.fileno()
SCREAMING_SNAKE_CASE__ : List[Any] = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE__ : List[str] = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE__ : Any = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 132
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
"""simple docstring"""
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCAmelCase__ ( UpperCamelCase__=None ):
'''simple docstring'''
if subparsers is not None:
_a : Dict = subparsers.add_parser("""env""" )
else:
_a : int = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=_UpperCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = torch.__version__
_a : Union[str, Any] = torch.cuda.is_available()
_a : int = is_xpu_available()
_a : Any = is_npu_available()
_a : Any = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
_a : Dict = load_config_from_file(args.config_file ).to_dict()
_a : Optional[int] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(_UpperCamelCase ),
"""PyTorch NPU available""": str(_UpperCamelCase ),
"""System RAM""": F"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""",
}
if pt_cuda_available:
_a : Optional[int] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
_a : List[str] = (
"""\n""".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else F"""\t{accelerate_config}"""
)
print(_UpperCamelCase )
_a : str = accelerate_config
return info
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = env_command_parser()
_a : str = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 294
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 0
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase, _lowerCamelCase : int = text, pattern
_lowerCamelCase, _lowerCamelCase : Optional[int] = len(__a ), len(__a )
def A_ ( self , lowercase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self , lowercase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self ):
# searches pattern in text and returns index positions
_lowerCamelCase : Any = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : Tuple = self.mismatch_in_text(__a )
if mismatch_index == -1:
positions.append(__a )
else:
_lowerCamelCase : str = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase__ = "ABAABA"
lowercase__ = "AB"
lowercase__ = BoyerMooreSearch(text, pattern)
lowercase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 96
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase_ ( snake_case_ = True,*snake_case_,**snake_case_ ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
_A : List[str] = False
if main_process_only:
_A : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase,**_UpperCamelCase,disable=_UpperCamelCase )
| 26
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 0
|
from ....utils import logging
__a : Optional[int] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=20_48 ) -> Optional[int]:
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 210
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Dict = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_snake_case : Optional[Any] = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = EfficientNetConfig()
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["hidden_dim"]
__snake_case : List[str] = CONFIG_MAP[model_name]["width_coef"]
__snake_case : str = CONFIG_MAP[model_name]["depth_coef"]
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[str] = CONFIG_MAP[model_name]["dropout_rate"]
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["dw_padding"]
__snake_case : List[Any] = "huggingface/label-files"
__snake_case : Optional[Any] = "imagenet-1k-id2label.json"
__snake_case : Tuple = 1_0_0_0
__snake_case : int = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Dict = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ):
__snake_case : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[str] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__snake_case : Optional[Any] = sorted(set(_UpperCamelCase ) )
__snake_case : List[Any] = len(_UpperCamelCase )
__snake_case : Optional[int] = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__snake_case : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__snake_case : List[str] = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__snake_case : Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
__snake_case : str = "efficientnet." + item[1]
__snake_case : Dict = "classifier.weight"
__snake_case : Tuple = "classifier.bias"
return key_mapping
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__snake_case : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__snake_case : List[Any] = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__snake_case : str = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__snake_case : Tuple = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__snake_case : Optional[int] = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = model_classes[model_name](
include_top=_UpperCamelCase , weights="imagenet" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="softmax" , )
__snake_case : Tuple = original_model.trainable_variables
__snake_case : List[str] = original_model.non_trainable_variables
__snake_case : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__snake_case : int = param.numpy()
__snake_case : str = list(tf_params.keys() )
# Load HuggingFace model
__snake_case : List[Any] = get_efficientnet_config(_UpperCamelCase )
__snake_case : Any = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__snake_case : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__snake_case : List[Any] = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__snake_case : Any = convert_image_processor(_UpperCamelCase )
__snake_case : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__snake_case : Tuple = hf_model(**_UpperCamelCase )
__snake_case : Any = outputs.logits.detach().numpy()
# Original model inference
__snake_case : Tuple = False
__snake_case : Tuple = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__snake_case : Optional[Any] = image.img_to_array(_UpperCamelCase )
__snake_case : Dict = np.expand_dims(_UpperCamelCase , axis=0 )
__snake_case : List[str] = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
__snake_case : Union[str, Any] = F'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_snake_case : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 123
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = len(_UpperCamelCase )
# We need to create solution object to save path.
lowerCamelCase_ = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
lowerCamelCase_ = run_maze(_UpperCamelCase , 0 , 0 , _UpperCamelCase )
if solved:
print("\n".join(str(_UpperCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
lowerCamelCase_ = len(_UpperCamelCase )
# Final check point.
if i == j == (size - 1):
lowerCamelCase_ = 1
return True
lowerCamelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase_ = 1
# check for directions
if (
run_maze(_UpperCamelCase , i + 1 , _UpperCamelCase , _UpperCamelCase )
or run_maze(_UpperCamelCase , _UpperCamelCase , j + 1 , _UpperCamelCase )
or run_maze(_UpperCamelCase , i - 1 , _UpperCamelCase , _UpperCamelCase )
or run_maze(_UpperCamelCase , _UpperCamelCase , j - 1 , _UpperCamelCase )
):
return True
lowerCamelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 0
|
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase = remove_duplicates(key.upper() )
UpperCamelCase = len(_UpperCamelCase )
# First fill cipher with key characters
UpperCamelCase = {alphabet[i]: char for i, char in enumerate(_UpperCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCamelCase ) , 26 ):
UpperCamelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase = alphabet[i - offset]
UpperCamelCase = char
return cipher_alphabet
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
return "".join(cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = input("""Enter message to encode or decode: """ ).strip()
UpperCamelCase = input("""Enter keyword: """ ).strip()
UpperCamelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
UpperCamelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
UpperCamelCase = create_cipher_map(_UpperCamelCase )
print(func(_UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 343
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowerCamelCase : int =AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(__a )
from datasets import load_dataset
_lowerCamelCase : Any =load_dataset('nielsr/rvlcdip-demo' )
_lowerCamelCase : int =dataset['train'][0]['image'].convert('RGB' )
_lowerCamelCase : List[str] =image_processor(__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] =model(**__a )
_lowerCamelCase : Dict =outputs.logits
_lowerCamelCase : Any =torch.Size((1, 16) )
self.assertEqual(logits.shape , __a )
_lowerCamelCase : int =torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __a , atol=1E-4 ) )
| 199
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase__ = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase_ :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WATERMARK_BITS
UpperCAmelCase__ : Any = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def lowercase_ ( self : Dict , _A : Any ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
UpperCAmelCase__ : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Optional[int] = [self.encoder.encode(__a , '''dwtDct''' ) for image in images]
UpperCAmelCase__ : str = torch.from_numpy(np.array(__a ) ).permute(0 , 3 , 1 , 2 )
UpperCAmelCase__ : List[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 181
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
'''simple docstring'''
@staticmethod
def _a ( *_a , **_a ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ : str = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _a ( self , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = vqa_pipeline(__a , top_k=1 )
self.assertEqual(
__a , [
[{"""score""": ANY(__a ), """answer""": ANY(__a )}],
[{"""score""": ANY(__a ), """answer""": ANY(__a )}],
] , )
@require_torch
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : Dict = """How many cats are there?"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vqa_pipeline(image=__a , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__a , [{"""score""": ANY(__a ), """answer""": ANY(__a )}, {"""score""": ANY(__a ), """answer""": ANY(__a )}] )
SCREAMING_SNAKE_CASE__ : int = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__a , [{"""score""": ANY(__a ), """answer""": ANY(__a )}, {"""score""": ANY(__a ), """answer""": ANY(__a )}] )
@slow
@require_torch
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
SCREAMING_SNAKE_CASE__ : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """How many cats are there?"""
SCREAMING_SNAKE_CASE__ : Any = vqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ : Dict = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ : Optional[int] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
| 132
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = LxmertTokenizer
UpperCamelCase : Optional[int] = LxmertTokenizerFast
UpperCamelCase : int = True
UpperCamelCase : Dict = True
def _lowercase ( self : Optional[Any] ) -> Dict:
super().setUp()
_a : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowercase ( self : int , UpperCAmelCase__ : Tuple ) -> List[Any]:
_a : Union[str, Any] = """UNwant\u00E9d,running"""
_a : int = """unwanted, running"""
return input_text, output_text
def _lowercase ( self : Tuple ) -> Dict:
_a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_a : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def _lowercase ( self : Dict ) -> Any:
if not self.test_rust_tokenizer:
return
_a : Dict = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : int = """I was born in 92000, and this is falsé."""
_a : str = tokenizer.tokenize(__a )
_a : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_a : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
_a : Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_a : int = self.get_rust_tokenizer()
_a : Union[str, Any] = tokenizer.encode(__a )
_a : Optional[int] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
| 294
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
_lowerCamelCase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ = logging.getLogger(__name__)
def _snake_case ( lowercase__ , lowercase__ ):
if metric == "rouge2":
_lowerCamelCase : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCamelCase : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCamelCase : Tuple = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowerCamelCase : List[str] = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( lowercase__ , lowercase__ ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def A_ ( self , lowercase , lowercase , lowercase , lowercase=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowerCamelCase : Any = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCamelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase : str = od / 'test_results.txt'
_lowerCamelCase : Union[str, Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase : List[str] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowerCamelCase : Optional[Any] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , 'a+' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase : int = metrics[key]
if isinstance(__a , torch.Tensor ):
_lowerCamelCase : str = val.item()
_lowerCamelCase : int = F'''{key}: {val:.6f}\n'''
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase : Tuple = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__a )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
try:
_lowerCamelCase : Dict = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase : int = pl_module.model.num_parameters()
_lowerCamelCase : int = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , 'test' )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 96
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase ( lowerCAmelCase__ ):
def __init__( self , _a="" , _a="train" ) -> Tuple:
assert os.path.isdir(__a )
_A : List[str] = []
_A : int = os.listdir(__a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_A : List[str] = os.path.join(__a , __a )
if not os.path.isfile(__a ):
continue
self.documents.append(__a )
def __len__( self ) -> Optional[Any]:
return len(self.documents )
def __getitem__( self , _a ) -> str:
_A : int = self.documents[idx]
_A : List[Any] = document_path.split("""/""" )[-1]
with open(__a , encoding="""utf-8""" ) as source:
_A : Dict = source.read()
_A , _A : Any = process_story(__a )
return document_name, story_lines, summary_lines
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = list(filter(lambda snake_case_ : len(_UpperCamelCase ) != 0,[line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
_A : Any = [_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
_A : int = []
_A : Any = deque(_UpperCamelCase )
while True:
try:
_A : str = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_A : str = list(filter(lambda snake_case_ : not t.startswith("""@highlight""" ),_UpperCamelCase ) )
return story_lines, summary_lines
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = torch.ones_like(_UpperCamelCase )
_A : Optional[Any] = sequence == pad_token_id
_A : int = 0
return mask
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = [tokenizer.encode(_UpperCamelCase ) for line in story_lines]
_A : Dict = [token for sentence in story_lines_token_ids for token in sentence]
_A : int = [tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
_A : Dict = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = []
for sequence in batch:
_A : Optional[Any] = -1
_A : Any = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 26
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 0
|
def UpperCamelCase ( __lowercase : Dict = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
A_ : List[str] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A_ : Union[str, Any] = 2
A_ : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A_ : int = i
while n % i == 0:
A_ : Union[str, Any] = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 140
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
__a : torch.FloatTensor
__a : torch.FloatTensor
__a : Optional[torch.FloatTensor] = None
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
__a : Tuple = 2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1_00 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> str:
'''simple docstring'''
__lowercase = sigma_max
# setable values
__lowercase = None
__lowercase = None
__lowercase = None # sigma(t_i)
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
'''simple docstring'''
__lowercase = num_inference_steps
__lowercase = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowercase = torch.from_numpy(__a ).to(__a )
__lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowercase = torch.tensor(__a , dtype=torch.floataa , device=__a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__lowercase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowercase = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
__lowercase = sigma + gamma * sigma
__lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = sample_hat + sigma_hat * model_output
__lowercase = (sample_hat - pred_original_sample) / sigma_hat
__lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> List[str]:
'''simple docstring'''
__lowercase = sample_prev + sigma_prev * model_output
__lowercase = (sample_prev - pred_original_sample) / sigma_prev
__lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 210
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : str=None , **lowerCamelCase : Optional[int] ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__snake_case : Any = model
__snake_case : Dict = kwargs.get("model_save_dir" , __a )
__snake_case : Optional[int] = kwargs.get("latest_model_name" , __a )
def __call__( self : Tuple , **lowerCamelCase : Optional[int] ) -> int:
__snake_case : int = {k: np.array(__a ) for k, v in kwargs.items()}
return self.model.run(__a , __a )
@staticmethod
def __snake_case ( lowerCamelCase : Tuple , lowerCamelCase : str=None , lowerCamelCase : List[str]=None ) -> str:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__snake_case : List[Any] = "CPUExecutionProvider"
return ort.InferenceSession(__a , providers=[provider] , sess_options=__a )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str = None , **lowerCamelCase : List[str] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__snake_case : int = self.model_save_dir.joinpath(self.latest_model_name )
__snake_case : List[Any] = Path(__a ).joinpath(__a )
try:
shutil.copyfile(__a , __a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__snake_case : List[str] = self.model_save_dir.joinpath(__a )
if src_path.exists():
__snake_case : Tuple = Path(__a ).joinpath(__a )
try:
shutil.copyfile(__a , __a )
except shutil.SameFileError:
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , **lowerCamelCase : Optional[Any] , ) -> Dict:
if os.path.isfile(__a ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__a , exist_ok=__a )
# saving model weights/files
self._save_pretrained(__a , **__a )
@classmethod
def __snake_case ( cls : Dict , lowerCamelCase : Dict , lowerCamelCase : Dict = None , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : Optional[Any] = False , lowerCamelCase : List[Any] = None , lowerCamelCase : Dict = None , lowerCamelCase : Any = None , lowerCamelCase : List[str] = None , **lowerCamelCase : Optional[Any] , ) -> Optional[int]:
__snake_case : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__a ):
__snake_case : Tuple = OnnxRuntimeModel.load_model(
os.path.join(__a , __a ) , provider=__a , sess_options=__a )
__snake_case : Dict = Path(__a )
# load model from hub
else:
# download model
__snake_case : List[Any] = hf_hub_download(
repo_id=__a , filename=__a , use_auth_token=__a , revision=__a , cache_dir=__a , force_download=__a , )
__snake_case : Optional[Any] = Path(__a ).parent
__snake_case : List[str] = Path(__a ).name
__snake_case : Tuple = OnnxRuntimeModel.load_model(__a , provider=__a , sess_options=__a )
return cls(model=__a , **__a )
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Tuple , lowerCamelCase : int = True , lowerCamelCase : List[Any] = None , lowerCamelCase : Tuple = None , **lowerCamelCase : List[Any] , ) -> Optional[Any]:
__snake_case : str = None
if len(str(__a ).split("@" ) ) == 2:
__snake_case , __snake_case : List[Any] = model_id.split("@" )
return cls._from_pretrained(
model_id=__a , revision=__a , cache_dir=__a , force_download=__a , use_auth_token=__a , **__a , )
| 123
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 0
|
'''simple docstring'''
import re
import subprocess
import sys
a_ : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
a_ : str = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
a_ : Any = "|".join(sys.argv[1:])
a_ : Any = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
a_ : Dict = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 55
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 5_0_0_0_3
_SCREAMING_SNAKE_CASE = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase = PLBartTokenizer
__lowerCAmelCase = None
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
UpperCamelCase = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
UpperCamelCase = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = PLBartTokenizer(__a , language_codes="""multi""" , keep_accents=__a )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
UpperCamelCase = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
UpperCamelCase = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__lowerCAmelCase = """uclanlp/plbart-python-en_XX"""
__lowerCAmelCase = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__lowerCAmelCase = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__lowerCAmelCase = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase_ ( cls : str ):
"""simple docstring"""
UpperCamelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
UpperCamelCase = 1
return cls
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0003 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIn(__a , self.tokenizer.all_special_ids )
UpperCamelCase = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCamelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __a )
UpperCamelCase = 10
UpperCamelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_0004, 5_0001] )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCamelCase = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
UpperCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
UpperCamelCase = targets["""input_ids"""]
UpperCamelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 5_0003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0001,
} , )
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( lowerCAmelCase__ ):
UpperCamelCase__ : Any =(DDPMParallelScheduler,)
def lowerCamelCase ( self : Any , **lowercase_ : List[str] ) -> int:
"""simple docstring"""
_lowerCamelCase : int ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__a )
return config
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__a )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : int =self.get_scheduler_config()
_lowerCamelCase : Any =scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Any =self.scheduler_classes[0]
_lowerCamelCase : Any =self.get_scheduler_config()
_lowerCamelCase : str =scheduler_class(**__a )
_lowerCamelCase : Dict =len(__a )
_lowerCamelCase : Union[str, Any] =self.dummy_model()
_lowerCamelCase : int =self.dummy_sample_deter
_lowerCamelCase : str =self.dummy_sample_deter + 0.1
_lowerCamelCase : List[str] =self.dummy_sample_deter - 0.1
_lowerCamelCase : List[str] =samplea.shape[0]
_lowerCamelCase : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCamelCase : List[str] =torch.arange(__a )[0:3, None].repeat(1 , __a )
_lowerCamelCase : Union[str, Any] =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCamelCase : Dict =scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCamelCase : str =torch.sum(torch.abs(__a ) )
_lowerCamelCase : Optional[Any] =torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config()
_lowerCamelCase : List[str] =scheduler_class(**__a )
_lowerCamelCase : List[Any] =len(__a )
_lowerCamelCase : List[Any] =self.dummy_model()
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter
_lowerCamelCase : Optional[int] =torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_lowerCamelCase : int =model(__a , __a )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] =scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_lowerCamelCase : Optional[Any] =pred_prev_sample
_lowerCamelCase : str =torch.sum(torch.abs(__a ) )
_lowerCamelCase : Optional[Any] =torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCamelCase : Any =scheduler_class(**__a )
_lowerCamelCase : Any =len(__a )
_lowerCamelCase : List[Any] =self.dummy_model()
_lowerCamelCase : Any =self.dummy_sample_deter
_lowerCamelCase : Optional[int] =torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_lowerCamelCase : List[str] =model(__a , __a )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Any =scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_lowerCamelCase : str =pred_prev_sample
_lowerCamelCase : List[str] =torch.sum(torch.abs(__a ) )
_lowerCamelCase : Dict =torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : str =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**__a )
_lowerCamelCase : Tuple =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
_lowerCamelCase : Dict =scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
_lowerCamelCase : Any =-1
else:
_lowerCamelCase : str =timesteps[i + 1]
_lowerCamelCase : Union[str, Any] =scheduler.previous_timestep(__a )
_lowerCamelCase : List[Any] =prev_t.item()
self.assertEqual(__a , __a )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =self.scheduler_classes[0]
_lowerCamelCase : Dict =self.get_scheduler_config()
_lowerCamelCase : str =scheduler_class(**__a )
_lowerCamelCase : Dict =[100, 87, 50, 51, 0]
with self.assertRaises(__a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__a )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =self.scheduler_classes[0]
_lowerCamelCase : List[Any] =self.get_scheduler_config()
_lowerCamelCase : Any =scheduler_class(**__a )
_lowerCamelCase : Optional[Any] =[100, 87, 50, 1, 0]
_lowerCamelCase : Optional[Any] =len(__a )
with self.assertRaises(__a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : str =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config()
_lowerCamelCase : Optional[Any] =scheduler_class(**__a )
_lowerCamelCase : Any =[scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__a )
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
lowerCAmelCase__ = """instructblip_vision_model"""
def __init__( self : List[str] , _A : Optional[int]=1_408 , _A : List[Any]=6_144 , _A : Dict=39 , _A : Optional[Any]=16 , _A : Tuple=224 , _A : Union[str, Any]=14 , _A : List[Any]="gelu" , _A : Tuple=1e-6 , _A : Optional[Any]=0.0 , _A : int=1e-10 , _A : Optional[Any]=True , **_A : List[Any] , ):
'''simple docstring'''
super().__init__(**__a )
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = qkv_bias
@classmethod
def lowercase_ ( cls : Optional[int] , _A : Optional[int] , **_A : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCAmelCase__ : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class lowerCamelCase_ ( lowerCAmelCase__ ):
lowerCAmelCase__ = """instructblip_qformer"""
def __init__( self : str , _A : List[str]=30_522 , _A : Optional[int]=768 , _A : List[Any]=12 , _A : int=12 , _A : Tuple=3_072 , _A : List[str]="gelu" , _A : List[str]=0.1 , _A : Optional[int]=0.1 , _A : Tuple=512 , _A : int=0.0_2 , _A : Tuple=1e-12 , _A : str=0 , _A : int="absolute" , _A : str=2 , _A : Union[str, Any]=1_408 , **_A : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Dict = position_embedding_type
UpperCAmelCase__ : List[Any] = cross_attention_frequency
UpperCAmelCase__ : List[str] = encoder_hidden_size
@classmethod
def lowercase_ ( cls : int , _A : List[Any] , **_A : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cls.get_config_dict(__a , **__a )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCAmelCase__ : str = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class lowerCamelCase_ ( lowerCAmelCase__ ):
lowerCAmelCase__ = """instructblip"""
lowerCAmelCase__ = True
def __init__( self : Dict , _A : Optional[int]=None , _A : List[Any]=None , _A : Union[str, Any]=None , _A : Optional[int]=32 , **_A : Optional[int] ):
'''simple docstring'''
super().__init__(**__a )
if vision_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
UpperCAmelCase__ : Union[str, Any] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
UpperCAmelCase__ : str = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
UpperCAmelCase__ : Optional[int] = InstructBlipVisionConfig(**__a )
UpperCAmelCase__ : Optional[int] = InstructBlipQFormerConfig(**__a )
UpperCAmelCase__ : Optional[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase__ : List[str] = CONFIG_MAPPING[text_model_type](**__a )
UpperCAmelCase__ : str = self.text_config.tie_word_embeddings
UpperCAmelCase__ : int = self.text_config.is_encoder_decoder
UpperCAmelCase__ : List[Any] = num_query_tokens
UpperCAmelCase__ : str = self.vision_config.hidden_size
UpperCAmelCase__ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ : Dict = 1.0
UpperCAmelCase__ : Optional[Any] = 0.0_2
@classmethod
def lowercase_ ( cls : List[Any] , _A : str , _A : Dict , _A : str , **_A : List[str] , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__a , )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Tuple = self.vision_config.to_dict()
UpperCAmelCase__ : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase__ : List[str] = self.text_config.to_dict()
UpperCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 181
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :str = logging.get_logger(__name__)
a :Union[str, Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __a (lowerCAmelCase__ , lowerCAmelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = """swin"""
_SCREAMING_SNAKE_CASE :Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> int:
"""simple docstring"""
super().__init__(**__a )
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : int = embed_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__a )
SCREAMING_SNAKE_CASE__ : Tuple = num_heads
SCREAMING_SNAKE_CASE__ : int = window_size
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Dict = int(embed_dim * 2 ** (len(__a ) - 1) )
SCREAMING_SNAKE_CASE__ : List[str] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __a (lowerCAmelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = version.parse("""1.11""")
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return 1E-4
| 132
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class UpperCamelCase ( lowerCAmelCase__ ):
UpperCamelCase : Dict = """whisper"""
UpperCamelCase : str = ["""past_key_values"""]
UpperCamelCase : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=51865 , UpperCAmelCase__ : Union[str, Any]=80 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int=1536 , UpperCAmelCase__ : Optional[Any]=1536 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Tuple=50257 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : int=256 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0_2 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=1500 , UpperCAmelCase__ : str=448 , UpperCAmelCase__ : Optional[int]=50256 , UpperCAmelCase__ : Any=50256 , UpperCAmelCase__ : str=50256 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=[220, 50256] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : int=0.0_5 , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=10 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Tuple=7 , **UpperCAmelCase__ : Union[str, Any] , ) -> Tuple:
_a : Any = vocab_size
_a : Optional[Any] = num_mel_bins
_a : Optional[Any] = d_model
_a : List[str] = encoder_layers
_a : str = encoder_attention_heads
_a : Union[str, Any] = decoder_layers
_a : str = decoder_attention_heads
_a : Any = decoder_ffn_dim
_a : int = encoder_ffn_dim
_a : List[Any] = dropout
_a : int = attention_dropout
_a : Union[str, Any] = activation_dropout
_a : str = activation_function
_a : Optional[int] = init_std
_a : Tuple = encoder_layerdrop
_a : Optional[Any] = decoder_layerdrop
_a : Union[str, Any] = use_cache
_a : List[str] = encoder_layers
_a : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_a : Dict = max_source_positions
_a : Union[str, Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_a : Union[str, Any] = classifier_proj_size
_a : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : Optional[int] = apply_spec_augment
_a : int = mask_time_prob
_a : List[str] = mask_time_length
_a : List[Any] = mask_time_min_masks
_a : str = mask_feature_prob
_a : Dict = mask_feature_length
_a : Optional[int] = mask_feature_min_masks
_a : str = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class UpperCamelCase ( lowerCAmelCase__ ):
@property
def _lowercase ( self : Dict ) -> List[Any]:
_a : List[Any] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_a : Any = {0: """batch"""}
else:
_a : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] = -1 , UpperCAmelCase__ : List[str] = -1 , UpperCAmelCase__ : str = False , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Tuple = 22050 , UpperCAmelCase__ : Tuple = 5.0 , UpperCAmelCase__ : Any = 220 , ) -> Union[str, Any]:
_a : Dict = OrderedDict()
_a : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
_a : str = encoder_inputs["""input_features"""].shape[2]
_a : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
_a : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
_a : Optional[Any] = encoder_inputs.pop("""input_features""" )
_a : List[str] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_a : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase ( self : str ) -> Any:
return 1E-3
| 294
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : int = 0
def A_ ( self ):
_lowerCamelCase : Dict = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(__a , __a )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Dict = Path(__a ) / 'preprocessor_config.json'
_lowerCamelCase : int = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def A_ ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = Path(__a ) / 'preprocessor_config.json'
_lowerCamelCase : List[Any] = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
_lowerCamelCase : int = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowerCamelCase : List[Any] = Path(__a ) / 'preprocessor_config.json'
_lowerCamelCase : List[str] = Path(__a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop('image_processor_type' )
_lowerCamelCase : Union[str, Any] = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_lowerCamelCase : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__a , __a )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] = Path(__a ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def A_ ( self ):
with self.assertRaisesRegex(
__a , 'clip-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained('clip-base' )
def A_ ( self ):
with self.assertRaisesRegex(
__a , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = AutoImageProcessor.from_pretrained(__a , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
__a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
_lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def A_ ( self ):
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[int] = Path(__a ) / 'preprocessor_config.json'
_lowerCamelCase : List[str] = Path(__a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(__a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(__a , 'w' ) )
_lowerCamelCase : List[Any] = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
_lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self ):
class lowerCAmelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = True
try:
AutoConfig.register('custom' , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_lowerCamelCase : str = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(__a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 96
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 0
|
_snake_case = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_snake_case = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_snake_case = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_snake_case = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_snake_case = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_snake_case = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_snake_case = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_snake_case = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 26
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 140
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : List[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = ["ConvNextFeatureExtractor"]
__a : Optional[int] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 210
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a (datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : str = "utf-8"
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True # deprecated
__UpperCAmelCase : Optional[int] = None # deprecated
__UpperCAmelCase : int = 10 << 20 # 10MB
__UpperCAmelCase : Optional[bool] = None
class a (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : str = JsonConfig
def __snake_case ( self : List[str] ) -> List[str]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
__snake_case : Optional[int] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Dict:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
__snake_case : List[Any] = data_files
if isinstance(__a , __a ):
__snake_case : int = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
__snake_case : Union[str, Any] = [files]
__snake_case : Optional[Any] = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def __snake_case ( self : Optional[int] , lowerCamelCase : Any ) -> Union[str, Any]:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__snake_case : Tuple = self.config.features.arrow_schema.field(__a ).type
__snake_case : Tuple = pa_table.append_column(__a , pa.array([None] * len(__a ) , type=__a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : int = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ) -> Dict:
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__snake_case : Any = json.load(__a )
# We keep only the field we are interested in
__snake_case : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__a , (list, tuple) ):
__snake_case : Any = set().union(*[row.keys() for row in dataset] )
__snake_case : Union[str, Any] = {col: [row.get(__a ) for row in dataset] for col in keys}
else:
__snake_case : Optional[Any] = dataset
__snake_case : List[Any] = pa.Table.from_pydict(__a )
yield file_idx, self._cast_table(__a )
# If the file has one json object per line
else:
with open(__a , "rb" ) as f:
__snake_case : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case : Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
__snake_case : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
__snake_case : List[str] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case : List[str] = batch.decode(self.config.encoding , errors=__a ).encode("utf-8" )
try:
while True:
try:
__snake_case : List[Any] = paj.read_json(
io.BytesIO(__a ) , read_options=paj.ReadOptions(block_size=__a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__a , pa.ArrowInvalid )
and "straddling" not in str(__a )
or block_size > len(__a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__snake_case : Dict = json.load(__a )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__a , __a ): # list is the only sequence type supported in JSON
try:
__snake_case : Dict = set().union(*[row.keys() for row in dataset] )
__snake_case : int = {col: [row.get(__a ) for row in dataset] for col in keys}
__snake_case : Any = pa.Table.from_pydict(__a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__a )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
batch_idx += 1
| 123
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int ):
if len(_UpperCamelCase ) < 2:
return collection
def circle_sort_util(UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ) -> bool:
lowerCamelCase_ = False
if low == high:
return swapped
lowerCamelCase_ = low
lowerCamelCase_ = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase_ ,lowerCamelCase_ = (
collection[right],
collection[left],
)
lowerCamelCase_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase_ ,lowerCamelCase_ = (
collection[right + 1],
collection[left],
)
lowerCamelCase_ = True
lowerCamelCase_ = low + int((high - low) / 2 )
lowerCamelCase_ = circle_sort_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ = circle_sort_util(_UpperCamelCase , mid + 1 , _UpperCamelCase )
return swapped or left_swap or right_swap
lowerCamelCase_ = True
while is_not_sorted is True:
lowerCamelCase_ = circle_sort_util(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return collection
if __name__ == "__main__":
a_ : str = input("""Enter numbers separated by a comma:\n""").strip()
a_ : str = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 55
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_SCREAMING_SNAKE_CASE = "bert-base-cased"
_SCREAMING_SNAKE_CASE = "fp16"
_SCREAMING_SNAKE_CASE = "bf16"
_SCREAMING_SNAKE_CASE = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
super().setUp()
UpperCamelCase = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = f"""{i + 1}"""
UpperCamelCase = strategy
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = prefetch_policy
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = state_dict_type
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase = """2000"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase = """T5Layer"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = """SIZE_BASED_WRAP"""
UpperCamelCase = """0"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = mp_dtype
with mockenv_context(**__a ):
UpperCamelCase = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase = torch.bfloataa
UpperCamelCase = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = str(__a ).lower()
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
UpperCamelCase = 0.8_2
UpperCamelCase = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase = 160
UpperCamelCase = 160
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__a ):
UpperCamelCase = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
UpperCamelCase = cmd_config[:-1]
UpperCamelCase = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 343
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["PoolFormerFeatureExtractor"]
lowerCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 199
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
UpperCamelCase__ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
UpperCamelCase__ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase_ ( self : Dict , _A : Optional[int] , _A : Optional[int] , _A : Tuple=None , _A : Any=1 , _A : Tuple="binary" , _A : int=None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = fa_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a )
return {"f1": float(__a ) if score.size == 1 else score}
| 181
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a :Tuple = logging.get_logger(__name__)
a :Optional[Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
a :str = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
a :str = "</w>"
a :Dict = "@@ "
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = set()
SCREAMING_SNAKE_CASE__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
a :Any = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class __a (lowerCAmelCase__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=False , _a=None , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
SCREAMING_SNAKE_CASE__ : List[str] = do_lower_case
with open(__a , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Any = json.load(__a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
else:
with open(__a , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Any = merges_handle.read().split("""\n""" )[:-1]
SCREAMING_SNAKE_CASE__ : Any = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE__ : int = dict(zip(__a , range(len(__a ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = {}
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return len(self.decoder )
def _a ( self ) -> Any:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_pairs(__a )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : int = min(__a , key=lambda _a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = bigram
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = 0
while i < len(__a ):
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : Tuple = tuple(__a )
SCREAMING_SNAKE_CASE__ : Dict = new_word
if len(__a ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_pairs(__a )
SCREAMING_SNAKE_CASE__ : Any = """ """.join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE__ : List[Any] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(__a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = word.replace(__a , """""" )
SCREAMING_SNAKE_CASE__ : List[str] = word.replace(""" """ , __a )
SCREAMING_SNAKE_CASE__ : Tuple = word
return word
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ : List[Any] = text.lower()
SCREAMING_SNAKE_CASE__ : Optional[int] = text.split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(""" """ ) ) )
return split_tokens
def _a ( self , _a ) -> Any:
"""simple docstring"""
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.decoder.get(__a , self.unk_token )
return result
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """ """.join(__a )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE__ : Any = """""".join(string.split(__a ) )
return string
def _a ( self , _a , _a = None ) -> str:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 132
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 0
|
"""simple docstring"""
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = multiprocessing.Manager()
_a : Any = manager.list()
_a : Any = multiprocessing.Process(target=_UpperCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_a : Tuple = shutil.rmtree
_a : Tuple = os.rmdir
_a : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_a : Dict = {}
with swallow_io():
with time_limit(_UpperCamelCase ):
exec(_UpperCamelCase , _UpperCamelCase )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
_a : Optional[Any] = rmtree
_a : List[str] = rmdir
_a : Dict = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
def signal_handler(UpperCamelCase__ , UpperCamelCase__ ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , _UpperCamelCase )
signal.signal(signal.SIGALRM , _UpperCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCamelCase ):
with contextlib.redirect_stderr(_UpperCamelCase ):
with redirect_stdin(_UpperCamelCase ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCamelCase ):
yield dirname
class UpperCamelCase ( lowerCAmelCase__ ):
pass
class UpperCamelCase ( io.StringIO ):
def _lowercase ( self : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ) -> Any:
raise OSError
def _lowercase ( self : List[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Union[str, Any]:
raise OSError
def _lowercase ( self : Union[str, Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ) -> Optional[int]:
raise OSError
def _lowercase ( self : Tuple , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) -> Tuple:
return False
class UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase : Dict = """stdin"""
@contextlib.contextmanager
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if root == ".":
yield
return
_a : int = os.getcwd()
os.chdir(_UpperCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCamelCase )
def lowerCAmelCase__ ( UpperCamelCase__=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_a : Optional[Any] = None
_a : Optional[int] = None
import os
_a : Union[str, Any] = """1"""
_a : Dict = None
_a : Union[str, Any] = None
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Any = None
_a : List[str] = None
_a : Union[str, Any] = None
_a : Optional[int] = None
_a : Any = None
_a : Tuple = None
_a : str = None
_a : Optional[Any] = None
_a : Union[str, Any] = None
_a : str = None
_a : Optional[Any] = None
_a : Optional[int] = None
_a : List[str] = None
_a : Any = None
_a : str = None
_a : int = None
_a : Any = None
_a : List[str] = None
_a : Tuple = None
_a : Union[str, Any] = None
_a : List[str] = None
_a : Optional[int] = None
_a : Optional[Any] = None
import shutil
_a : Union[str, Any] = None
_a : str = None
_a : Any = None
import subprocess
_a : Optional[int] = None # type: ignore
_a : Union[str, Any] = None
import sys
_a : Dict = None
_a : Any = None
_a : List[Any] = None
_a : Dict = None
_a : List[str] = None
| 294
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 0
|
"""simple docstring"""
import baseaa
def _snake_case ( lowercase__ ):
return baseaa.aaaencode(string.encode('utf-8' ) )
def _snake_case ( lowercase__ ):
return baseaa.aaadecode(_UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_snake_case = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase,_UpperCamelCase )
_snake_case = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : str = list(s_dict.keys() )
for key in keys:
_A : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_A : Dict = new_key.replace(_UpperCamelCase,_UpperCamelCase )
print(f'''{key} -> {new_key}''' )
_A : Union[str, Any] = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : str = emb.weight.shape
_A : int = nn.Linear(_UpperCamelCase,_UpperCamelCase,bias=_UpperCamelCase )
_A : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
os.makedirs(_UpperCamelCase,exist_ok=_UpperCamelCase )
_A : Union[str, Any] = os.path.basename(_UpperCamelCase )
_A : List[str] = url.split("""/""" )[-2]
_A : int = os.path.join(_UpperCamelCase,_UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
_A : Optional[int] = open(_UpperCamelCase,"""rb""" ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase,"""wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ),ncols=80,unit="""iB""",unit_scale=_UpperCamelCase,unit_divisor=1024 ) as loop:
while True:
_A : Union[str, Any] = source.read(8192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
_A : Union[str, Any] = open(_UpperCamelCase,"""rb""" ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if ".pt" not in checkpoint_path:
_A : Union[str, Any] = _download(_MODELS[checkpoint_path] )
else:
_A : List[str] = torch.load(_UpperCamelCase,map_location="""cpu""" )
_A : Optional[int] = original_checkpoint["""dims"""]
_A : int = original_checkpoint["""model_state_dict"""]
_A : Dict = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
_A : Dict = True
_A : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
_A : Optional[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""],encoder_ffn_dim=_UpperCamelCase,decoder_ffn_dim=_UpperCamelCase,num_mel_bins=dimensions["""n_mels"""],d_model=dimensions["""n_audio_state"""],max_target_positions=dimensions["""n_text_ctx"""],encoder_layers=dimensions["""n_audio_layer"""],encoder_attention_heads=dimensions["""n_audio_head"""],decoder_layers=dimensions["""n_text_layer"""],decoder_attention_heads=dimensions["""n_text_state"""],max_source_positions=dimensions["""n_audio_ctx"""],)
_A : int = WhisperForConditionalGeneration(_UpperCamelCase )
_A , _A : int = model.model.load_state_dict(_UpperCamelCase,strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A : int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A : List[str] = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 26
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase = {
"camembert-base": 512,
}
_UpperCAmelCase = "▁"
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=["<s>NOTUSED", "</s>NOTUSED"] , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Optional[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
A_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
A_ : Any = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A_ : List[str] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
A_ : Optional[Any] = len(self.fairseq_tokens_to_ids )
A_ : Optional[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
A_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
A_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Any = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = []
A_ : Any = ''
A_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
A_ : str = True
A_ : Optional[int] = []
else:
current_sub_tokens.append(__a )
A_ : Tuple = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self , lowercase ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A_ : Dict = {}
A_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Union[str, Any] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 140
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 0
|
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [0] * len(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
# use last results for better performance - dynamic programming
__lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__lowercase = j
return prefix_result
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return max(prefix_function(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 210
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 0
|
from copy import deepcopy
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Tuple = None , lowerCamelCase : Union[str, Any] = None ) -> List[str]:
if arr is None and size is not None:
__snake_case : str = size
__snake_case : List[Any] = [0] * size
elif arr is not None:
self.init(__a )
else:
raise ValueError("Either arr or size must be specified" )
def __snake_case ( self : str , lowerCamelCase : int ) -> Tuple:
__snake_case : Optional[Any] = len(__a )
__snake_case : Dict = deepcopy(__a )
for i in range(1 , self.size ):
__snake_case : Dict = self.next_(__a )
if j < self.size:
self.tree[j] += self.tree[i]
def __snake_case ( self : str ) -> Tuple:
__snake_case : str = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__snake_case : Tuple = self.next_(__a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __snake_case ( lowerCamelCase : int ) -> Union[str, Any]:
return index + (index & (-index))
@staticmethod
def __snake_case ( lowerCamelCase : str ) -> Union[str, Any]:
return index - (index & (-index))
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ) -> Optional[int]:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__snake_case : Any = self.next_(__a )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ) -> str:
self.add(__a , value - self.get(__a ) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Any ) -> List[str]:
if right == 0:
return 0
__snake_case : str = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__snake_case : Union[str, Any] = self.prev(__a )
return result
def __snake_case ( self : str , lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Dict:
return self.prefix(__a ) - self.prefix(__a )
def __snake_case ( self : List[Any] , lowerCamelCase : int ) -> Optional[Any]:
return self.query(__a , index + 1 )
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> int:
value -= self.tree[0]
if value < 0:
return -1
__snake_case : Optional[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__snake_case : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
def __snake_case ( ):
lowerCamelCase_ = Node(1 )
lowerCamelCase_ = Node(2 )
lowerCamelCase_ = Node(3 )
lowerCamelCase_ = Node(4 )
lowerCamelCase_ = Node(5 )
return tree
def __snake_case ( UpperCAmelCase_ : Dict ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __snake_case ( UpperCAmelCase_ : List[Any] ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __snake_case ( UpperCAmelCase_ : Dict ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __snake_case ( UpperCAmelCase_ : Any ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __snake_case ( UpperCAmelCase_ : Any ):
lowerCamelCase_ = []
if root is None:
return output
lowerCamelCase_ = deque([root] )
while process_queue:
lowerCamelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = []
def populate_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = []
def populate_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def __snake_case ( UpperCAmelCase_ : List[Any] ):
if root is None:
return []
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
lowerCamelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
lowerCamelCase_ = 0
return output
def __snake_case ( ): # Main function for testing.
lowerCamelCase_ = make_tree()
print(F'''In-order Traversal: {inorder(_UpperCamelCase )}''' )
print(F'''Pre-order Traversal: {preorder(_UpperCamelCase )}''' )
print(F'''Post-order Traversal: {postorder(_UpperCamelCase )}''' , "\n" )
print(F'''Height of Tree: {height(_UpperCamelCase )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(_UpperCamelCase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print("\nZigZag order Traversal: " )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 55
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__lowerCAmelCase = """mobilenet_v2"""
def __init__( self : Any , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Union[str, Any]=224 , lowerCamelCase_ : int=1.0 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Optional[Any]=8 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple="relu6" , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : str=0.8 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : List[str]=0.0_0_1 , lowerCamelCase_ : Union[str, Any]=255 , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = depth_divisible_by
UpperCamelCase = min_depth
UpperCamelCase = expand_ratio
UpperCamelCase = output_stride
UpperCamelCase = first_layer_is_expansion
UpperCamelCase = finegrained_output
UpperCamelCase = hidden_act
UpperCamelCase = tf_padding
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 1E-4
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class A ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=True , lowercase_ : str=None , **lowercase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =parent
_lowerCamelCase : Optional[Any] =config_class
_lowerCamelCase : Optional[Any] =has_text_modality
_lowerCamelCase : Optional[int] =kwargs
_lowerCamelCase : Optional[int] =common_properties
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.config_class(**self.inputs_dict )
_lowerCamelCase : Optional[int] =(
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=F'''`{name} value {idx} expected, but was {getattr(__a , __a )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
_lowerCamelCase : str =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=F'''`{name} value {idx} expected, but was {getattr(__a , __a )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =self.config_class(**self.inputs_dict )
_lowerCamelCase : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_lowerCamelCase : int =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : str =os.path.join(__a , 'config.json' )
config_first.to_json_file(__a )
_lowerCamelCase : str =self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
_lowerCamelCase : Optional[Any] =self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.config_class(**self.inputs_dict )
_lowerCamelCase : Tuple ='test'
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] =os.path.join(__a , __a )
config_first.save_pretrained(__a )
_lowerCamelCase : Union[str, Any] =self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Dict =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCamelCase : Union[str, Any] =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self.config_class.is_composition:
return
_lowerCamelCase : List[str] =self.config_class()
self.parent.assertIsNotNone(__a )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =copy.deepcopy(__a )
_lowerCamelCase : List[str] =self.config_class(**__a )
_lowerCamelCase : int =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
_lowerCamelCase : List[Any] ='\n'.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase__ = "pt"
elif is_tf_available():
UpperCamelCase__ = "tf"
else:
UpperCamelCase__ = "jax"
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = PerceiverTokenizer
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self : int ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self : List[Any] , **_A : Optional[Any] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowercase_ ( self : Tuple , _A : Dict , _A : str=False , _A : List[str]=20 , _A : Optional[Any]=5 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
for i in range(len(__a ) ):
try:
UpperCAmelCase__ : Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=__a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase__ : Optional[Any] = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __a ) )
UpperCAmelCase__ : Optional[int] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase__ : str = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase__ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ : int = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase__ : int = ''' ''' + output_txt
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.perceiver_tokenizer
UpperCAmelCase__ : List[str] = '''Unicode €.'''
UpperCAmelCase__ : List[Any] = tokenizer(__a )
UpperCAmelCase__ : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , __a )
# decoding
UpperCAmelCase__ : List[Any] = tokenizer.decode(__a )
self.assertEqual(__a , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase__ : Tuple = tokenizer('''e è é ê ë''' )
UpperCAmelCase__ : Optional[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , __a )
# decoding
UpperCAmelCase__ : List[Any] = tokenizer.decode(__a )
self.assertEqual(__a , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.perceiver_tokenizer
UpperCAmelCase__ : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase__ : Optional[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase__ : List[str] = tokenizer(__a , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
if FRAMEWORK != "jax":
UpperCAmelCase__ : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase__ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.perceiver_tokenizer
UpperCAmelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Any = tokenizer(__a , padding=__a , return_tensors=__a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __a )
self.assertIn('''attention_mask''' , __a )
self.assertNotIn('''decoder_input_ids''' , __a )
self.assertNotIn('''decoder_attention_mask''' , __a )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.perceiver_tokenizer
UpperCAmelCase__ : List[str] = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase__ : int = tokenizer(
text_target=__a , max_length=32 , padding='''max_length''' , truncation=__a , return_tensors=__a )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase__ : Any = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase__ : Any = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase__ : Dict = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
UpperCAmelCase__ : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : Any = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase__ : int = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
UpperCAmelCase__ : str = tokenizer.__class__.from_pretrained(__a )
UpperCAmelCase__ : Optional[Any] = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase__ : List[str] = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : List[Any] = json.load(__a )
with open(os.path.join(__a , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : List[str] = json.load(__a )
UpperCAmelCase__ : str = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCAmelCase__ : Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase__ : Dict = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__a , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase__ : Optional[Any] = tokenizer_class.from_pretrained(
__a , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase__ : Optional[int] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__a )]
UpperCAmelCase__ : Union[str, Any] = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : Dict = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(__a , __a )
| 181
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=() , __lowerCAmelCase=None , __lowerCAmelCase="no" , __lowerCAmelCase="29500" ) -> str:
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE__ : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
SCREAMING_SNAKE_CASE__ : int = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
SCREAMING_SNAKE_CASE__ : Dict = 8
SCREAMING_SNAKE_CASE__ : Any = PrepareForLaunch(_UpperCamelCase , distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="""127.0.01""" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : str = PrepareForLaunch(_UpperCamelCase , distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE__ : Optional[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_UpperCamelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=() , __lowerCAmelCase=2 ) -> Dict:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
SCREAMING_SNAKE_CASE__ : str = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="""fork""" )
| 132
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_a : Optional[Any] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
_a : List[str] = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
_a : Optional[Any] = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 294
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 0
|
"""simple docstring"""
import numpy
# List of input, output pairs
lowercase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase__ = [2, 4, 1, 5]
lowercase__ = len(train_data)
lowercase__ = 0.009
def _snake_case ( lowercase__ , lowercase__="train" ):
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( lowercase__ , lowercase__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( lowercase__ , lowercase__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( lowercase__ , lowercase__=m ):
_lowerCamelCase : Optional[Any] = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _snake_case ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : int = 0.0_0_0_0_0_2
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
while True:
j += 1
_lowerCamelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
_lowerCamelCase : Any = get_cost_derivative(i - 1 )
_lowerCamelCase : Dict = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
_lowerCamelCase : List[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def _snake_case ( ):
for i in range(len(_UpperCamelCase ) ):
print(('Actual output value:', output(_UpperCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_UpperCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 96
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
_A : List[Any] = tempfile.mkdtemp()
_A : str = BlipImageProcessor()
_A : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A : int = BlipaProcessor(__a , __a )
processor.save_pretrained(self.tmpdirname )
def a__ ( self , **_a ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).tokenizer
def a__ ( self , **_a ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def a__ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Optional[Any]:
_A : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : Union[str, Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
_A : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a__ ( self ) -> int:
_A : Any = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : Dict = BlipaProcessor(tokenizer=__a , image_processor=__a )
_A : Dict = self.prepare_image_inputs()
_A : Union[str, Any] = image_processor(__a , return_tensors="""np""" )
_A : Tuple = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> Optional[int]:
_A : str = self.get_image_processor()
_A : List[Any] = self.get_tokenizer()
_A : Tuple = BlipaProcessor(tokenizer=__a , image_processor=__a )
_A : Union[str, Any] = """lower newer"""
_A : Dict = processor(text=__a )
_A : List[str] = tokenizer(__a , return_token_type_ids=__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> List[Any]:
_A : str = self.get_image_processor()
_A : int = self.get_tokenizer()
_A : Any = BlipaProcessor(tokenizer=__a , image_processor=__a )
_A : Optional[Any] = """lower newer"""
_A : Any = self.prepare_image_inputs()
_A : int = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self ) -> Optional[Any]:
_A : Tuple = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : Dict = BlipaProcessor(tokenizer=__a , image_processor=__a )
_A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : Any = processor.batch_decode(__a )
_A : Any = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def a__ ( self ) -> Tuple:
_A : Optional[Any] = self.get_image_processor()
_A : str = self.get_tokenizer()
_A : Optional[Any] = BlipaProcessor(tokenizer=__a , image_processor=__a )
_A : Any = """lower newer"""
_A : List[Any] = self.prepare_image_inputs()
_A : List[str] = processor(text=__a , images=__a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 26
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
_UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 140
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : List[str] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
__a : str = """luke"""
def __init__( self , lowerCAmelCase__=5_02_67 , lowerCAmelCase__=50_00_00 , lowerCAmelCase__=7_68 , lowerCAmelCase__=2_56 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 210
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase ):
if num < 0:
return False
__snake_case : int = num
__snake_case : int = 0
while num > 0:
__snake_case : Optional[Any] = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __snake_case ( UpperCAmelCase_ : List[str] ):
return (data["data"], data["target"])
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def __snake_case ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ ,lowerCamelCase_ = data_handling(_UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 55
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 0
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowerCAmelCase = """nezha"""
def __init__( self : Any , lowerCamelCase_ : Tuple=2_1128 , lowerCamelCase_ : Dict=768 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Union[str, Any]=3072 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Optional[int]=512 , lowerCamelCase_ : Dict=64 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : int=1E-12 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : int=0 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : int=True , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 343
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase = {"facebook/blenderbot-3B": 1_28}
class A ( lowerCAmelCase__ ):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Union[str, Any] =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : List[str] =BlenderbotTokenizer
def __init__( self : List[Any] , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]="replace" , lowercase_ : List[str]="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : int="<mask>" , lowercase_ : Tuple=False , lowercase_ : Optional[int]=True , **lowercase_ : str , ) -> Tuple:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
_lowerCamelCase : Optional[int] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
_lowerCamelCase : Optional[int] =getattr(__a , pre_tok_state.pop('type' ) )
_lowerCamelCase : Union[str, Any] =add_prefix_space
_lowerCamelCase : Tuple =pre_tok_class(**__a )
_lowerCamelCase : str =add_prefix_space
_lowerCamelCase : List[str] ='post_processor'
_lowerCamelCase : Any =getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
_lowerCamelCase : Optional[Any] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Union[str, Any] =tuple(state['sep'] )
if "cls" in state:
_lowerCamelCase : Any =tuple(state['cls'] )
_lowerCamelCase : Any =False
if state.get('add_prefix_space' , __a ) != add_prefix_space:
_lowerCamelCase : int =add_prefix_space
_lowerCamelCase : List[str] =True
if state.get('trim_offsets' , __a ) != trim_offsets:
_lowerCamelCase : List[Any] =trim_offsets
_lowerCamelCase : Dict =True
if changes_to_apply:
_lowerCamelCase : Union[str, Any] =getattr(__a , state.pop('type' ) )
_lowerCamelCase : Optional[int] =component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : List[str] , lowercase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : int =AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
_lowerCamelCase : Union[str, Any] =value
def lowerCamelCase ( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple =kwargs.get('is_split_into_words' , __a )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def lowerCamelCase ( self : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Dict =kwargs.get('is_split_into_words' , __a )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def lowerCamelCase ( self : str , lowercase_ : Any , lowercase_ : Optional[int] = None ) -> int:
"""simple docstring"""
_lowerCamelCase : str =self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : str = None ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =[self.sep_token_id]
_lowerCamelCase : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : int = None ) -> Union[str, Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : int , lowercase_ : List[str] ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
_lowerCamelCase : Optional[Any] =' '.join(__a )
_lowerCamelCase : Any =self.encode(__a )
if len(__a ) > self.model_max_length:
_lowerCamelCase : Dict =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 199
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
with open(_UpperCamelCase ) as metadata_file:
UpperCAmelCase__ : int = json.load(_UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = LukeConfig(use_entity_aware_attention=_UpperCamelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCAmelCase__ : str = torch.load(_UpperCamelCase , map_location='''cpu''' )
# Load the entity vocab file
UpperCAmelCase__ : Union[str, Any] = load_entity_vocab(_UpperCamelCase )
UpperCAmelCase__ : List[Any] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase__ : List[Any] = LukeTokenizer.from_pretrained(_UpperCamelCase )
# Initialize the embeddings of the special tokens
UpperCAmelCase__ : Optional[int] = state_dict['''embeddings.word_embeddings.weight''']
UpperCAmelCase__ : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
UpperCAmelCase__ : int = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
UpperCAmelCase__ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase__ : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
UpperCAmelCase__ : int = state_dict[prefix + matrix_name]
UpperCAmelCase__ : int = state_dict[prefix + matrix_name]
UpperCAmelCase__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase__ : str = state_dict['''entity_embeddings.entity_embeddings.weight''']
UpperCAmelCase__ : List[str] = entity_emb[entity_vocab['''[MASK]''']]
UpperCAmelCase__ : Tuple = LukeModel(config=_UpperCamelCase ).eval()
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if not (len(_UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(_UpperCamelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
UpperCAmelCase__ : Dict = LukeTokenizer.from_pretrained(_UpperCamelCase , task='''entity_classification''' )
UpperCAmelCase__ : Union[str, Any] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
UpperCAmelCase__ : str = (39, 42)
UpperCAmelCase__ : Dict = tokenizer(_UpperCamelCase , entity_spans=[span] , add_prefix_space=_UpperCamelCase , return_tensors='''pt''' )
UpperCAmelCase__ : Dict = model(**_UpperCamelCase )
# Verify word hidden states
if model_size == "large":
UpperCAmelCase__ : Tuple = torch.Size((1, 42, 10_24) )
UpperCAmelCase__ : Tuple = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
UpperCAmelCase__ : int = torch.Size((1, 42, 7_68) )
UpperCAmelCase__ : Any = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCAmelCase__ : List[str] = torch.Size((1, 1, 10_24) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
UpperCAmelCase__ : Any = torch.Size((1, 1, 7_68) )
UpperCAmelCase__ : List[str] = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_UpperCamelCase ) )
model.save_pretrained(_UpperCamelCase )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : List[Any] = {}
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_UpperCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = line.rstrip().split('''\t''' )
UpperCAmelCase__ : Dict = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 181
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
a :Dict = logging.get_logger(__name__)
a :List[str] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1E-5,
"""token_type_vocab_size""": 2,
}
SCREAMING_SNAKE_CASE__ : Tuple = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=_UpperCamelCase , output_all_encodings=_UpperCamelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , _UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE__ : Any = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE__ : int = os.path.join(get_home_dir() , """models""" )
SCREAMING_SNAKE_CASE__ : str = _load_vocab(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , cls=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = nlp.model.BERTModel(
_UpperCamelCase , len(_UpperCamelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=_UpperCamelCase , use_token_type_embed=_UpperCamelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=_UpperCamelCase , use_decoder=_UpperCamelCase , )
original_bort.load_parameters(_UpperCamelCase , cast_dtype=_UpperCamelCase , ignore_extra=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE__ : Tuple = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(_UpperCamelCase ),
}
SCREAMING_SNAKE_CASE__ : Dict = BertConfig.from_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = BertForMaskedLM(_UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = hf_param.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
SCREAMING_SNAKE_CASE__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
SCREAMING_SNAKE_CASE__ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
SCREAMING_SNAKE_CASE__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer.attention.self
SCREAMING_SNAKE_CASE__ : List[str] = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
SCREAMING_SNAKE_CASE__ : List[str] = layer.attention.output
SCREAMING_SNAKE_CASE__ : Tuple = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
SCREAMING_SNAKE_CASE__ : Any = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
SCREAMING_SNAKE_CASE__ : Optional[int] = layer.intermediate
SCREAMING_SNAKE_CASE__ : List[str] = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
SCREAMING_SNAKE_CASE__ : List[str] = layer.output
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
SCREAMING_SNAKE_CASE__ : str = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-base""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode_plus(_UpperCamelCase )["""input_ids"""]
# Get gluon output
SCREAMING_SNAKE_CASE__ : Dict = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = original_bort(inputs=_UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = BertModel.from_pretrained(_UpperCamelCase )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode_plus(_UpperCamelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Any = hf_bort_model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE__ : str = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE__ : Tuple = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE__ : int = np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , _UpperCamelCase )
if __name__ == "__main__":
a :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a :Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 132
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = AltDiffusionPipeline
UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_a : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
_a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_a : Union[str, Any] = CLIPTextModel(__a )
_a : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_a : Dict = 77
_a : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=0 ) -> str:
if str(__a ).startswith("""mps""" ):
_a : int = torch.manual_seed(__a )
else:
_a : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
_a : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : str ) -> Optional[Any]:
_a : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
torch.manual_seed(0 )
_a : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_a : Any = RobertaSeriesModelWithTransformation(__a )
_a : List[Any] = text_encoder
_a : Optional[int] = AltDiffusionPipeline(**__a )
_a : Tuple = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a : List[str] = self.get_dummy_inputs(__a )
_a : Dict = """A photo of an astronaut"""
_a : int = alt_pipe(**__a )
_a : Union[str, Any] = output.images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Tuple ) -> Optional[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
_a : Union[str, Any] = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
_a : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_a : Any = RobertaSeriesModelWithTransformation(__a )
_a : Dict = text_encoder
_a : str = AltDiffusionPipeline(**__a )
_a : Optional[Any] = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a : Any = self.get_dummy_inputs(__a )
_a : List[str] = alt_pipe(**__a )
_a : List[str] = output.images
_a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[Any] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ) -> int:
# make sure here that pndm scheduler skips prk
_a : List[str] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__a )
_a : Optional[Any] = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a : Union[str, Any] = """A painting of a squirrel eating a burger"""
_a : Union[str, Any] = torch.manual_seed(0 )
_a : Dict = alt_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
_a : Dict = output.images
_a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : Any = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
_a : str = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__a , safety_checker=__a )
_a : int = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a : Optional[int] = """A painting of a squirrel eating a burger"""
_a : List[str] = torch.manual_seed(0 )
_a : List[Any] = alt_pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""numpy""" )
_a : List[Any] = output.images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : Tuple = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 294
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
def A_ ( self ):
_lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def A_ ( self ):
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Union[str, Any] = os.path.join(__a , 'feat_extract.json' )
feat_extract_first.to_json_file(__a )
_lowerCamelCase : List[str] = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : str = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
_lowerCamelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self ):
_lowerCamelCase : Dict = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 96
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class lowercase ( lowerCAmelCase__ ):
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*__a , **__a )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def a__ ( self , _a=None , _a=None , _a=None ) -> List[str]:
_A : int = {}
_A : Optional[Any] = {}
if prompt is not None:
_A : Tuple = prompt
if generate_kwargs is not None:
_A : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_A : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _a , **_a ) -> Dict:
return super().__call__(__a , **__a )
def a__ ( self , _a , _a=None ) -> Tuple:
_A : List[str] = load_image(__a )
if prompt is not None:
if not isinstance(__a , __a ):
raise ValueError(
F'''Received an invalid text input, got - {type(__a )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
_A : List[str] = self.model.config.model_type
if model_type == "git":
_A : List[Any] = self.image_processor(images=__a , return_tensors=self.framework )
_A : List[Any] = self.tokenizer(text=__a , add_special_tokens=__a ).input_ids
_A : str = [self.tokenizer.cls_token_id] + input_ids
_A : Optional[Any] = torch.tensor(__a ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_A : int = self.image_processor(images=__a , header_text=__a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A : Optional[Any] = self.image_processor(images=__a , return_tensors=self.framework )
_A : Union[str, Any] = self.tokenizer(__a , return_tensors=self.framework )
model_inputs.update(__a )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
_A : int = self.image_processor(images=__a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A : Any = None
return model_inputs
def a__ ( self , _a , _a=None ) -> int:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __a )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_A : Any = None
if generate_kwargs is None:
_A : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A : Any = model_inputs.pop(self.model.main_input_name )
_A : int = self.model.generate(__a , **__a , **__a )
return model_outputs
def a__ ( self , _a ) -> List[Any]:
_A : Optional[int] = []
for output_ids in model_outputs:
_A : Dict = {
"""generated_text""": self.tokenizer.decode(
__a , skip_special_tokens=__a , )
}
records.append(__a )
return records
| 26
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 0
|
from manim import *
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = Rectangle(height=0.5 , width=0.5 )
A_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
A_ : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
A_ : List[str] = VGroup(__a , __a ).arrange(__a , buff=0 )
A_ : Tuple = Text('CPU' , font_size=2_4 )
A_ : Optional[int] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
A_ : str = [mem.copy() for i in range(4 )]
A_ : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
A_ : Tuple = Text('GPU' , font_size=2_4 )
A_ : int = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
A_ : Union[str, Any] = [mem.copy() for i in range(6 )]
A_ : Tuple = VGroup(*__a ).arrange(__a , buff=0 )
A_ : Optional[Any] = Text('Model' , font_size=2_4 )
A_ : List[str] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
A_ : Optional[int] = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A_ : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Any = VGroup(*__a ).arrange(__a , buff=0 )
A_ : List[Any] = Text('Loaded Checkpoint' , font_size=2_4 )
A_ : int = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A_ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : List[str] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
A_ : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A_ : Optional[Any] = MarkupText(
F'''Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
A_ : Optional[int] = []
A_ : Any = []
for i, rect in enumerate(__a ):
A_ : str = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
A_ : str = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 140
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 0
|
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(_UpperCamelCase ) + '''/p022_names.txt''' ) as file:
__lowercase = str(file.readlines()[0] )
__lowercase = names.replace('''\"''' , '''''' ).split(''',''' )
names.sort()
__lowercase = 0
__lowercase = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
__lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 210
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Dict = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ["GLPNFeatureExtractor"]
_snake_case : Optional[Any] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowerCamelCase_ = load_dataset("ashraq/esc50" )
lowerCamelCase_ = dataset["train"]["audio"][-1]["array"]
lowerCamelCase_ = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset("ashraq/esc50" )
lowerCamelCase_ = dataset["train"]["audio"][-1]["array"]
lowerCamelCase_ = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 0
|
from __future__ import annotations
def lowercase( UpperCamelCase_ = 4 ) -> Dict:
'''simple docstring'''
UpperCamelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = matrix[::-1]
return matrix
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = [x[::-1] for x in matrix]
return matrix
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=13 , lowercase_ : int=30 , lowercase_ : Any=2 , lowercase_ : Dict=3 , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : Tuple=32 , lowercase_ : List[Any]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Tuple=10 , lowercase_ : List[Any]=0.02 , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =parent
_lowerCamelCase : str =batch_size
_lowerCamelCase : Optional[Any] =image_size
_lowerCamelCase : Any =patch_size
_lowerCamelCase : Dict =num_channels
_lowerCamelCase : List[str] =is_training
_lowerCamelCase : int =use_labels
_lowerCamelCase : Optional[int] =hidden_size
_lowerCamelCase : Any =num_hidden_layers
_lowerCamelCase : str =num_attention_heads
_lowerCamelCase : Any =intermediate_size
_lowerCamelCase : Dict =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : List[str] =attention_probs_dropout_prob
_lowerCamelCase : Optional[int] =type_sequence_label_size
_lowerCamelCase : List[Any] =initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[Any] =(image_size // patch_size) ** 2
_lowerCamelCase : str =num_patches + 1
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] =ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase ( self : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =FlaxViTModel(config=__a )
_lowerCamelCase : str =model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[int] =(self.image_size, self.image_size)
_lowerCamelCase : List[str] =(self.patch_size, self.patch_size)
_lowerCamelCase : List[str] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.type_sequence_label_size
_lowerCamelCase : List[Any] =FlaxViTForImageClassification(config=__a )
_lowerCamelCase : Tuple =model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Union[str, Any] =1
_lowerCamelCase : str =FlaxViTForImageClassification(__a )
_lowerCamelCase : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Dict =model(__a )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Optional[Any] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A ( lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase__ : int =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =FlaxViTModelTester(self )
_lowerCamelCase : Union[str, Any] =ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict =model_class(__a )
_lowerCamelCase : Dict =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] =[*signature.parameters.keys()]
_lowerCamelCase : str =['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Dict =self._prepare_for_class(__a , __a )
_lowerCamelCase : Optional[int] =model_class(__a )
@jax.jit
def model_jitted(lowercase_ : Optional[int] , **lowercase_ : List[str] ):
return model(pixel_values=__a , **__a )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : int =model_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : Optional[Any] =model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Optional[Any] =model_class_name.from_pretrained('google/vit-base-patch16-224' )
_lowerCamelCase : str =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.