code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : str = logging.get_logger(__name__)
def A ( __snake_case: Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__magic_name__ = [1_4_4, 1_9_2, 2_4_0]
__magic_name__ = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__magic_name__ = [9_6, 1_2_0, 1_4_4]
__magic_name__ = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__magic_name__ = [6_4, 8_0, 9_6]
__magic_name__ = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__magic_name__ = 0.05
__magic_name__ = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
__magic_name__ = 5_1_2
__magic_name__ = 1_6
__magic_name__ = 2_1
__magic_name__ = 'pascal-voc-id2label.json'
else:
__magic_name__ = 1_0_0_0
__magic_name__ = 'imagenet-1k-id2label.json'
__magic_name__ = 'huggingface/label-files'
__magic_name__ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
__magic_name__ = {int(__snake_case ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def A ( __snake_case: Optional[int] , __snake_case: Optional[int]=False ) -> str:
"""simple docstring"""
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
__magic_name__ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__magic_name__ = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
__magic_name__ = name.replace('.block.' , '.' )
if "exp_1x1" in name:
__magic_name__ = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
__magic_name__ = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
__magic_name__ = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
__magic_name__ = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
__magic_name__ = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
__magic_name__ = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
__magic_name__ = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
__magic_name__ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
__magic_name__ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
__magic_name__ = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
__magic_name__ = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
__magic_name__ = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
__magic_name__ = name.replace(F""".global_rep.{i}.weight""" , '.layernorm.weight' )
if F""".global_rep.{i}.bias""" in name:
__magic_name__ = name.replace(F""".global_rep.{i}.bias""" , '.layernorm.bias' )
if ".global_rep." in name:
__magic_name__ = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
__magic_name__ = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
__magic_name__ = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
__magic_name__ = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
__magic_name__ = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
__magic_name__ = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
__magic_name__ = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
__magic_name__ = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
__magic_name__ = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
__magic_name__ = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
__magic_name__ = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
__magic_name__ = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
__magic_name__ = 'mobilevit.' + name
return name
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: str=False ) -> Optional[int]:
"""simple docstring"""
if base_model:
__magic_name__ = ''
else:
__magic_name__ = 'mobilevit.'
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
__magic_name__ = key[8:]
if "qkv" in key:
__magic_name__ = key.split('.' )
__magic_name__ = int(key_split[0][6:] ) - 1
__magic_name__ = int(key_split[3] )
__magic_name__ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
__magic_name__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__magic_name__ = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def A ( ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def A ( __snake_case: Union[str, Any] , __snake_case: Any , __snake_case: Optional[Any] , __snake_case: Tuple=False ) -> Dict:
"""simple docstring"""
__magic_name__ = get_mobilevit_config(__snake_case )
# load original state_dict
__magic_name__ = torch.load(__snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
__magic_name__ = MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
__magic_name__ = MobileViTForImageClassification(__snake_case ).eval()
__magic_name__ = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__magic_name__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__magic_name__ = image_processor(images=prepare_img() , return_tensors='pt' )
__magic_name__ = model(**__snake_case )
__magic_name__ = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__magic_name__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__magic_name__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__magic_name__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__magic_name__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__magic_name__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__magic_name__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
__magic_name__ = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
__magic_name__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='apple' )
model.push_to_hub(__snake_case , organization='apple' )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case : int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 545
|
"""simple docstring"""
def A ( __snake_case: int ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__magic_name__ = 1
__magic_name__ = 1
while repunit:
__magic_name__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( __snake_case: int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__magic_name__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 545
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase: Dict = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(__UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="." ):
_lowercase : Any = []
for k, v in d.items():
_lowercase : Optional[Any] = parent_key + sep + k if parent_key else k
if isinstance(__UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCAmelCase , __UpperCAmelCase , sep=__UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCAmelCase )
_lowercase : Dict = argparse.Namespace()
with open(__UpperCAmelCase , """r""" ) as yaml_file:
try:
_lowercase : Union[str, Any] = yaml.load(__UpperCAmelCase , Loader=yaml.FullLoader )
_lowercase : Union[str, Any] = flatten_yaml_as_dict(__UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(__UpperCAmelCase , str(__UpperCAmelCase ) ) )
return config
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = MobileViTVaConfig()
_lowercase : List[str] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_lowercase : int = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_lowercase : Optional[int] = 384
else:
_lowercase : Optional[int] = 256
_lowercase : Union[str, Any] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_lowercase : Dict = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_lowercase : Any = 384
else:
_lowercase : int = 256
_lowercase : Dict = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_lowercase : Optional[Any] = 151
_lowercase : Dict = 512
_lowercase : Dict = """ade20k-id2label.json"""
_lowercase : Any = True
elif task_name.startswith("""voc_""" ):
_lowercase : Optional[int] = 21
_lowercase : Optional[int] = 512
_lowercase : List[str] = """pascal-voc-id2label.json"""
_lowercase : Tuple = True
# orig_config
_lowercase : List[str] = load_orig_config_file(__UpperCAmelCase )
assert getattr(__UpperCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_lowercase : List[str] = getattr(__UpperCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(__UpperCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_lowercase : Any = getattr(__UpperCAmelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_lowercase : List[Any] = getattr(__UpperCAmelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_lowercase : List[str] = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_lowercase : Union[str, Any] = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
_lowercase : Tuple = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_lowercase : List[str] = """huggingface/label-files"""
_lowercase : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowercase : str = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowercase : Any = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = dct.pop(__UpperCAmelCase )
_lowercase : Optional[int] = val
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ):
if base_model:
_lowercase : List[Any] = """"""
else:
_lowercase : List[Any] = """mobilevitv2."""
_lowercase : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_lowercase : Union[str, Any] = k[8:]
else:
_lowercase : List[Any] = k
if ".block." in k:
_lowercase : List[Any] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_lowercase : Optional[int] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_lowercase : Tuple = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_lowercase : Tuple = k_new.replace("""conv_1.""" , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
_lowercase : Optional[Any] = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
_lowercase : Tuple = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_lowercase : Optional[int] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
_lowercase : Any = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
_lowercase : List[str] = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
_lowercase : List[str] = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
_lowercase : Any = [0, 1]
elif i == 4:
_lowercase : Optional[int] = [0, 1, 2, 3]
elif i == 5:
_lowercase : Any = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
_lowercase : Optional[int] = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
_lowercase : Optional[Any] = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
_lowercase : List[Any] = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
_lowercase : Tuple = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_lowercase : Any = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_lowercase : Any = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_lowercase : str = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_lowercase : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_lowercase : Union[str, Any] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_lowercase : int = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_lowercase : Tuple = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_lowercase : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Dict = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(__UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_lowercase : List[str] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = get_mobilevitva_config(__UpperCAmelCase , __UpperCAmelCase )
# load original state_dict
_lowercase : int = torch.load(__UpperCAmelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_lowercase : Tuple = MobileViTVaForSemanticSegmentation(__UpperCAmelCase ).eval()
_lowercase : Tuple = False
else:
_lowercase : Dict = MobileViTVaForImageClassification(__UpperCAmelCase ).eval()
_lowercase : Tuple = False
# remove and rename some keys of load the original model
_lowercase : List[Any] = checkpoint
remove_unused_keys(__UpperCAmelCase )
_lowercase : Any = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load modified state_dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowercase : str = model(**__UpperCAmelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
_lowercase : Tuple = outputs.logits
_lowercase : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_lowercase : Optional[int] = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase: Dict = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 600
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = ("DownEncoderBlock2D",) ,UpperCAmelCase_ = ("UpDecoderBlock2D",) ,UpperCAmelCase_ = (64,) ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0.18215 ,UpperCAmelCase_ = "group" ,):
super().__init__()
# pass init params to Encoder
_lowercase : List[Any] = Encoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,down_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,double_z=UpperCAmelCase_ ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : int = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
_lowercase : Union[str, Any] = VectorQuantizer(UpperCAmelCase_ ,UpperCAmelCase_ ,beta=0.25 ,remap=UpperCAmelCase_ ,sane_index_shape=UpperCAmelCase_ )
_lowercase : Union[str, Any] = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
# pass init params to Decoder
_lowercase : Union[str, Any] = Decoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,up_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,norm_type=UpperCAmelCase_ ,)
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Any = self.encoder(UpperCAmelCase_ )
_lowercase : List[Any] = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ):
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Union[str, Any] = self.quantize(UpperCAmelCase_ )
else:
_lowercase : int = h
_lowercase : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_ )
_lowercase : List[Any] = self.decoder(UpperCAmelCase_ ,quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : List[Any] = sample
_lowercase : Optional[Any] = self.encode(UpperCAmelCase_ ).latents
_lowercase : int = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 600
| 1
|
from __future__ import annotations
def _UpperCamelCase (a__ :Union[str, Any] , a__ :str , a__ :Dict , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 619
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = original_name.split('''.''' )[0]
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_list[key_list.index(_A ) - 2] )
lowerCAmelCase_ = int(key_list[key_list.index(_A ) - 1] )
lowerCAmelCase_ = orig_block_num - offset
lowerCAmelCase_ = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowerCAmelCase_ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase_ = key[: key.find('''proj''' )]
lowerCAmelCase_ = key.replace(_A , f"patch_embeddings.{total_embed_found}." )
lowerCAmelCase_ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase_ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowerCAmelCase_ = key.replace('''head''' , '''classifier''' )
lowerCAmelCase_ = value
return new_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = model_name[-3:]
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = (1, 1000)
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase_ = [2, 2, 6, 2]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 0.9
elif size == "s24":
lowerCAmelCase_ = [4, 4, 12, 4]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 0.9
elif size == "s36":
lowerCAmelCase_ = [6, 6, 18, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9
elif size == "m36":
lowerCAmelCase_ = [6, 6, 18, 6]
lowerCAmelCase_ = [96, 192, 384, 768]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9_5
elif size == "m48":
lowerCAmelCase_ = [8, 8, 24, 8]
lowerCAmelCase_ = [96, 192, 384, 768]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9_5
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
lowerCAmelCase_ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase_ = rename_keys(_A )
# create HuggingFace model and load state dict
lowerCAmelCase_ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase_ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
lowerCAmelCase_ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
lowerCAmelCase_ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
lowerCAmelCase_ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
lowerCAmelCase_ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 431
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
"""simple docstring"""
def __init__( self : List[str] ):
a__ : List[str] = {}
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : List[str] = {}
def _UpperCamelCase( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ):
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
a__ : List[Any] = probability
def _UpperCamelCase( self : Dict ):
return list(self.connections )
def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[Any] ):
a__ : Optional[int] = 0
a__ : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase_ ( __a , __a , __a ) -> dict[str, int]:
a__ : Dict = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Union[str, Any] = Counter(graph.get_nodes() )
a__ : Tuple = start
for _ in range(lowerCamelCase__ ):
a__ : str = graph.transition(lowerCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase : int = tuple[int, int]
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Node | None , ):
a__ : Dict = pos_x
a__ : Dict = pos_y
a__ : Union[str, Any] = (pos_y, pos_x)
a__ : int = goal_x
a__ : List[str] = goal_y
a__ : Dict = g_cost
a__ : Optional[Any] = parent
a__ : Optional[Any] = self.calculate_heuristic()
a__ : Any = self.g_cost + self.h_cost
def _UpperCamelCase( self : List[Any] ):
a__ : int = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase__ ) + abs(lowerCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict , lowerCamelCase__ : Node ):
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ):
a__ : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase__ )
a__ : List[str] = [self.start]
a__ : list[Node] = []
a__ : Any = False
def _UpperCamelCase( self : Tuple ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
a__ : int = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
a__ : List[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
return [self.start.pos]
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Node ):
a__ : Any = []
for action in delta:
a__ : Optional[int] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def _UpperCamelCase( self : str , lowerCamelCase__ : Node | None ):
a__ : Tuple = node
a__ : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ):
a__ : Dict = AStar(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = AStar(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = False
def _UpperCamelCase( self : Union[str, Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : List[str] = self.fwd_astar.open_nodes.pop(0 )
a__ : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
self.fwd_astar.closed_nodes.append(lowerCamelCase__ )
self.bwd_astar.closed_nodes.append(lowerCamelCase__ )
a__ : Dict = current_bwd_node
a__ : Dict = current_fwd_node
a__ : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
a__ : Tuple = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase__ )
else:
astar.open_nodes.append(lowerCamelCase__ )
return [self.fwd_astar.start.pos]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Node , lowerCamelCase__ : Node ):
a__ : str = self.fwd_astar.retrace_path(lowerCamelCase__ )
a__ : List[str] = self.bwd_astar.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
a__ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase : Any = (0, 0)
UpperCamelCase : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase : Union[str, Any] = time.time()
UpperCamelCase : Tuple = AStar(init, goal)
UpperCamelCase : Any = a_star.search()
UpperCamelCase : List[str] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
UpperCamelCase : Dict = time.time()
UpperCamelCase : Any = BidirectionalAStar(init, goal)
UpperCamelCase : Dict = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 151
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__A : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=1 ):
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE = n_copies
def __iter__( self : Dict ):
SCREAMING_SNAKE_CASE = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = eof_strings
SCREAMING_SNAKE_CASE = tokenizer
def __call__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = re.split("(%s)" % "|".join(A__ ) , A__ )
# last string should be ""
return "".join(string_list[:-2] )
def __a ( A__ : Any , A__ : List[str] , A__ : Dict , A__ : Dict , A__ : int , A__ : List[Any]=20 , **A__ : Tuple ):
SCREAMING_SNAKE_CASE = defaultdict(A__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A__ ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE = accelerator.unwrap_model(A__ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=A__ , **A__ )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE = batch["task_id"].repeat(A__ )
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(
A__ , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A__ , A__ ):
gen_token_dict[task].append(A__ )
SCREAMING_SNAKE_CASE = [[] for _ in range(A__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE = tokenizer.decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
code_gens[task].append(remove_last_block(A__ ) )
return code_gens
def __a ( ):
# Setup configuration
SCREAMING_SNAKE_CASE = HfArgumentParser(A__ )
SCREAMING_SNAKE_CASE = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE = Accelerator()
set_seed(args.seed , device_specific=A__ )
# Load model and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , A__ , A__ )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE = load_metric("code_eval" )
SCREAMING_SNAKE_CASE = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE = TokenizedDataset(A__ , human_eval["test"] , n_copies=A__ , n_tasks=A__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE = DataLoader(A__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(A__ , A__ )
SCREAMING_SNAKE_CASE = complete_code(
A__ , A__ , A__ , A__ , n_tasks=A__ , batch_size=args.batch_size , **A__ , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE = []
for task in tqdm(range(A__ ) ):
SCREAMING_SNAKE_CASE = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE = F"check({human_eval['test'][task]['entry_point']})"
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = code_eval_metric.compute(
references=A__ , predictions=A__ , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(A__ , A__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 16
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A : Union[str, Any] = None
A : int = logging.get_logger(__name__)
A : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : str = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
A : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
A : Union[str, Any] = '▁'
class UpperCamelCase( _a ):
snake_case_ : Tuple = VOCAB_FILES_NAMES
snake_case_ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : int = BigBirdTokenizer
snake_case_ : List[str] = ["""input_ids""", """attention_mask"""]
snake_case_ : List[int] = []
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Dict="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Tuple="</s>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : Dict="[SEP]" , SCREAMING_SNAKE_CASE : str="[MASK]" , SCREAMING_SNAKE_CASE : Optional[int]="[CLS]" , **SCREAMING_SNAKE_CASE : Tuple , ) -> Optional[int]:
'''simple docstring'''
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 701
|
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = Github(os.environ["GITHUB_TOKEN"] )
__snake_case = g.get_repo("huggingface/diffusers" )
__snake_case = repo.get_issues(state="open" )
for issue in open_issues:
__snake_case = sorted(issue.get_comments() , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
__snake_case = comments[0] if len(_lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 473
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( __UpperCAmelCase ):
def __init__( self , __A , __A , __A = None , __A = None , __A = False , **__A , ) -> List[Any]:
super().__init__(features=__A , cache_dir=__A , keep_in_memory=__A , **__A )
SCREAMING_SNAKE_CASE_ : List[Any] =Sql(
cache_dir=__A , features=__A , sql=__A , con=__A , **__A , )
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
SCREAMING_SNAKE_CASE_ : Any =None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , )
# Build dataset for splits
SCREAMING_SNAKE_CASE_ : Tuple =self.builder.as_dataset(
split='''train''' , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , __A , __A , __A , __A = None , __A = None , **__A , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
SCREAMING_SNAKE_CASE_ : List[Any] =dataset
SCREAMING_SNAKE_CASE_ : List[Any] =name
SCREAMING_SNAKE_CASE_ : Dict =con
SCREAMING_SNAKE_CASE_ : List[Any] =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] =to_sql_kwargs
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : str =self.to_sql_kwargs.pop('''sql''' , __A )
SCREAMING_SNAKE_CASE_ : Any =self.to_sql_kwargs.pop('''con''' , __A )
SCREAMING_SNAKE_CASE_ : Tuple =self.to_sql_kwargs.pop('''index''' , __A )
SCREAMING_SNAKE_CASE_ : Dict =self._write(index=__A , **self.to_sql_kwargs )
return written
def _snake_case ( self , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =args
SCREAMING_SNAKE_CASE_ : Any ={**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE_ : int =query_table(
table=self.dataset.data , key=slice(__A , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE_ : Tuple =batch.to_pandas()
SCREAMING_SNAKE_CASE_ : List[Any] =df.to_sql(self.name , self.con , index=__A , **__A )
return num_rows or len(__A )
def _snake_case ( self , __A , **__A ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE_ : str =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __A , __A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 443
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[str] = AudioLDMPipeline
__A : List[str] = TEXT_TO_AUDIO_PARAMS
__A : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
__A : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __lowercase ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase , )
a__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0)
a__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ : Tuple = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
a__ : str = ClapTextModelWithProjection(lowercase)
a__ : Dict = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
a__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase , )
a__ : Dict = SpeechTaHifiGan(lowercase)
a__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> Optional[int]:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : int = torch.manual_seed(lowercase)
else:
a__ : List[str] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[Any] = self.get_dummy_components()
a__ : int = AudioLDMPipeline(**lowercase)
a__ : Optional[int] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(**lowercase)
a__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : List[Any] = audio[:10]
a__ : Tuple = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Dict = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : List[str] = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 3 * [inputs.pop('prompt')]
a__ : Optional[Any] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : str = text_inputs['input_ids'].to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.text_encoder(
lowercase , )
a__ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Any = F.normalize(lowercase , dim=-1)
a__ : Any = prompt_embeds
# forward
a__ : int = audioldm_pipe(**lowercase)
a__ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = self.get_dummy_inputs(lowercase)
a__ : List[str] = 3 * ['this is a negative prompt']
a__ : Optional[int] = negative_prompt
a__ : Union[str, Any] = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : str = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : int = 3 * [inputs.pop('prompt')]
a__ : Tuple = []
for p in [prompt, negative_prompt]:
a__ : Optional[int] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : Optional[int] = text_inputs['input_ids'].to(lowercase)
a__ : List[str] = audioldm_pipe.text_encoder(
lowercase , )
a__ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Optional[Any] = F.normalize(lowercase , dim=-1)
embeds.append(lowercase)
a__ , a__ : Union[str, Any] = embeds
# forward
a__ : str = audioldm_pipe(**lowercase)
a__ : int = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components()
a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Any = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 'egg cracking'
a__ : Optional[int] = audioldm_pipe(**lowercase , negative_prompt=lowercase)
a__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : Optional[Any] = audio[:10]
a__ : List[str] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : Optional[int] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : int = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a__ : Union[str, Any] = 2
a__ : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a__ : Tuple = 2
a__ : int = audioldm_pipe(lowercase , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a__ : Dict = 2
a__ : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : List[Any] = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
a__ : Union[str, Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_16 , **lowercase)
a__ : int = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_16
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_32 , **lowercase)
a__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_32
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.get_dummy_components()
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = ['hey']
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
a__ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a__ : str = SpeechTaHifiGan(lowercase).to(lowercase)
a__ : Union[str, Any] = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase)
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : str = np.random.RandomState(lowercase).standard_normal((1, 8, 128, 16))
a__ : Union[str, Any] = torch.from_numpy(lowercase).to(device=lowercase , dtype=lowercase)
a__ : Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = 25
a__ : str = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : List[str] = audio[7_7230:7_7240]
a__ : Union[str, Any] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15])
a__ : Union[str, Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : Optional[Any] = audio[2_7780:2_7790]
a__ : Any = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12])
a__ : Optional[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 302
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 709
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = ['''pixel_values''']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = 32 , UpperCamelCase_=PILImageResampling.BILINEAR , UpperCamelCase_ = True , **UpperCamelCase_ , ):
__magic_name__ = do_resize
__magic_name__ = do_rescale
__magic_name__ = size_divisor
__magic_name__ = resample
super().__init__(**UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
__magic_name__ , __magic_name__ = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
__magic_name__ = height // size_divisor * size_divisor
__magic_name__ = width // size_divisor * size_divisor
__magic_name__ = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = size_divisor if size_divisor is not None else self.size_divisor
__magic_name__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__magic_name__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
__magic_name__ = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__magic_name__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 190
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int=1.0 , _lowerCamelCase: List[Any]=None , _lowerCamelCase: int=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__lowerCamelCase : Tuple = global_rng
__lowerCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=400 , UpperCAmelCase : Union[str, Any]=2000 , UpperCAmelCase : List[str]=2048 , UpperCAmelCase : Dict=128 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : Tuple=44100 , ):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : str = min_seq_length
__lowerCamelCase : int = max_seq_length
__lowerCamelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : int = spectrogram_length
__lowerCamelCase : Optional[Any] = feature_size
__lowerCamelCase : Union[str, Any] = num_audio_channels
__lowerCamelCase : Union[str, Any] = hop_length
__lowerCamelCase : Union[str, Any] = chunk_length
__lowerCamelCase : Any = sampling_rate
def lowerCamelCase__ ( self : Any ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Tuple=False ):
def _flatten(UpperCAmelCase : Union[str, Any] ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
__lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : Dict = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = TvltFeatureExtractor
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : int = TvltFeatureExtractionTester(self )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(__lowerCAmelCase , "sampling_rate" ) )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : str = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
__lowerCamelCase : int = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = feat_extract_first.to_dict()
__lowerCamelCase : Optional[Any] = feat_extract_second.to_dict()
__lowerCamelCase : Optional[int] = dict_first.pop("mel_filters" )
__lowerCamelCase : Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : str = os.path.join(__lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = feat_extract_first.to_dict()
__lowerCamelCase : Tuple = feat_extract_second.to_dict()
__lowerCamelCase : Union[str, Any] = dict_first.pop("mel_filters" )
__lowerCamelCase : int = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : Any = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCamelCase : str = feature_extractor(__lowerCAmelCase , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCamelCase : Dict = feature_extractor(
__lowerCAmelCase , return_tensors="np" , sampling_rate=44100 , mask_audio=__lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCamelCase : Optional[int] = np.asarray(__lowerCAmelCase )
__lowerCamelCase : str = feature_extractor(__lowerCAmelCase , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCamelCase : List[Any] = ds.sort("id" ).select(range(__lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
__lowerCamelCase : Any = TvltFeatureExtractor()
__lowerCamelCase : Optional[Any] = feature_extractor(__lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__lowerCamelCase : Tuple = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCAmelCase , atol=1E-4 ) )
| 646
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_a = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )
return sd
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
lowerCamelCase__ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCamelCase__ = key
for name_pair in rename_keys_prefix:
lowerCamelCase__ = new_key.replace(name_pair[0] ,name_pair[1] )
lowerCamelCase__ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCamelCase__ = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowerCamelCase__ = '''pretraining'''
if "vcr" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 512}
lowerCamelCase__ = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 2048}
lowerCamelCase__ = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowerCamelCase__ = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
lowerCamelCase__ = '''vqa'''
elif "nlvr" in checkpoint_path:
lowerCamelCase__ = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
lowerCamelCase__ = '''nlvr'''
lowerCamelCase__ = VisualBertConfig(**__snake_case )
# Load State Dict
lowerCamelCase__ = load_state_dict(__snake_case )
lowerCamelCase__ = get_new_dict(__snake_case ,__snake_case )
if model_type == "pretraining":
lowerCamelCase__ = VisualBertForPreTraining(__snake_case )
elif model_type == "vqa":
lowerCamelCase__ = VisualBertForQuestionAnswering(__snake_case )
elif model_type == "nlvr":
lowerCamelCase__ = VisualBertForVisualReasoning(__snake_case )
elif model_type == "multichoice":
lowerCamelCase__ = VisualBertForMultipleChoice(__snake_case )
model.load_state_dict(__snake_case )
# Save Checkpoints
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 481
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> Optional[int]:
'''simple docstring'''
lowercase_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase_ = 8
# set labels if required
if not base_model:
lowercase_ = 10_00
lowercase_ = """huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 12
lowercase_ = 6
# load original model from torch hub
lowercase_ = torch.hub.load("""facebookresearch/dino:main""" , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase_ = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
lowercase_ = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
lowercase_ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase_ = ViTImageProcessor()
lowercase_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase_ = encoding["""pixel_values"""]
lowercase_ = model(__lowerCAmelCase )
if base_model:
lowercase_ = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowercase_ = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
UpperCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 720
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase : int = 5_0003
UpperCAmelCase : Optional[Any] = 5_0002
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = PLBartTokenizer
lowercase__ = None
lowercase__ = False
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_)
lowercase_ = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowercase_ = tokenizer.vocab_size
lowercase_ = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_) for x in range(end - 4 , lowerCAmelCase_)]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""])
lowercase_ = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase_ = tokenizer(lowerCAmelCase_).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_) , lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_)
lowercase_ = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowercase_ = tokenizer.vocab_size
lowercase_ = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_) for x in range(end - 7 , lowerCAmelCase_)]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""])
lowercase_ = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase_ = tokenizer(lowerCAmelCase_).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = "uclanlp/plbart-python-en_XX"
lowercase__ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase__ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase__ = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _UpperCAmelCase ( cls : List[Any]):
"""simple docstring"""
lowercase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""")
lowercase_ = 1
return cls
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0_0_0_3)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids)
lowercase_ = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
lowercase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0] , lowerCAmelCase_)
lowercase_ = 1_0
lowercase_ = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowerCAmelCase_)
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""]) , [5_0_0_0_4, 5_0_0_0_1])
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = tempfile.mkdtemp()
lowercase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_)
lowercase_ = PLBartTokenizer.from_pretrained(lowerCAmelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_)
@require_torch
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="""pt""" , )
lowercase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 2_6) , batch.input_ids.shape)
self.assertEqual((2, 2_6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""")
lowercase_ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0 , return_tensors="""pt""")
lowercase_ = targets["""input_ids"""]
lowercase_ = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""")
self.assertEqual(
nested_simplify(lowerCAmelCase_) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0_0_0_1,
} , )
| 100
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="vivit"
def __init__( self : List[str] , lowercase__ : Union[str, Any]=2_24 , lowercase__ : Any=32 , lowercase__ : Any=[2, 16, 16] , lowercase__ : List[Any]=3 , lowercase__ : Any=7_68 , lowercase__ : Union[str, Any]=12 , lowercase__ : List[Any]=12 , lowercase__ : str=30_72 , lowercase__ : Optional[int]="gelu_fast" , lowercase__ : int=0.0 , lowercase__ : int=0.0 , lowercase__ : str=0.0_2 , lowercase__ : str=1e-06 , lowercase__ : Any=True , **lowercase__ : int , ):
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = tubelet_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
super().__init__(**lowercase__ )
| 192
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowercase: str = '''sshleifer/bart-tiny-random'''
_lowercase: Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return AutoConfig.from_pretrained(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with self.assertRaises(lowercase__ ):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__ )
| 192
| 1
|
"""simple docstring"""
from math import factorial
def lowercase ( lowerCAmelCase__ : int = 100 ) -> int:
return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 65
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 277
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : int = 0
if start < end:
SCREAMING_SNAKE_CASE : Optional[int] = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = a[end]
SCREAMING_SNAKE_CASE : List[str] = a[pivot]
SCREAMING_SNAKE_CASE : Dict = temp
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = _in_place_partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , p + 1 , _SCREAMING_SNAKE_CASE )
return count
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = a[end]
SCREAMING_SNAKE_CASE : int = a[pivot]
SCREAMING_SNAKE_CASE : Tuple = temp
SCREAMING_SNAKE_CASE : Union[str, Any] = start - 1
for index in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE : Tuple = new_pivot_index + 1
SCREAMING_SNAKE_CASE : Dict = a[new_pivot_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = a[index]
SCREAMING_SNAKE_CASE : Optional[int] = temp
SCREAMING_SNAKE_CASE : List[Any] = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE : Any = a[end]
SCREAMING_SNAKE_CASE : Union[str, Any] = temp
return new_pivot_index + 1, count
snake_case_ = TemporaryFile()
snake_case_ = 100 # 1000 elements are to be sorted
snake_case_ , snake_case_ = 0, 1 # mean and standard deviation
snake_case_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
snake_case_ = np.load(outfile)
snake_case_ = len(M) - 1
snake_case_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 355
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ : Union[str, Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase__ : Tuple = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCAmelCase__ : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(__magic_name__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(__magic_name__ )]
lowerCAmelCase__ = TER(
normalized=__magic_name__ , no_punct=__magic_name__ , asian_support=__magic_name__ , case_sensitive=__magic_name__ , )
lowerCAmelCase__ = sb_ter.corpus_score(__magic_name__ , __magic_name__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 48
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case=True , snake_case="pt" ):
SCREAMING_SNAKE_CASE:Optional[int] = {"add_prefix_space": True} if isinstance(snake_case , snake_case ) and not line.startswith(" " ) else {}
SCREAMING_SNAKE_CASE:Any = padding_side
return tokenizer(
[line] , max_length=snake_case , padding="max_length" if pad_to_max_length else None , truncation=snake_case , return_tensors=snake_case , add_special_tokens=snake_case , **snake_case , )
def A_ ( snake_case , snake_case , snake_case=None , ):
SCREAMING_SNAKE_CASE:List[str] = input_ids.ne(snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( _a ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple="train" ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Any="" ,):
super().__init__()
SCREAMING_SNAKE_CASE:int = Path(SCREAMING_SNAKE_CASE__ ).joinpath(type_path + ".source" )
SCREAMING_SNAKE_CASE:Optional[int] = Path(SCREAMING_SNAKE_CASE__ ).joinpath(type_path + ".target" )
SCREAMING_SNAKE_CASE:List[str] = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE:Tuple = max_source_length
SCREAMING_SNAKE_CASE:Any = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE:List[Any] = tokenizer
SCREAMING_SNAKE_CASE:str = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE:Dict = src_lang
SCREAMING_SNAKE_CASE:Optional[int] = tgt_lang
def __len__( self : Union[str, Any] ):
return len(self.src_lens )
def __getitem__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:List[str] = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE:Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,SCREAMING_SNAKE_CASE__ ).rstrip("\n" )
SCREAMING_SNAKE_CASE:Union[str, Any] = linecache.getline(str(self.tgt_file ) ,SCREAMING_SNAKE_CASE__ ).rstrip("\n" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE:str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE:Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__ ) else self.tokenizer
SCREAMING_SNAKE_CASE:int = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,"right" )
SCREAMING_SNAKE_CASE:List[Any] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,"right" )
SCREAMING_SNAKE_CASE:Dict = source_inputs["input_ids"].squeeze()
SCREAMING_SNAKE_CASE:List[str] = target_inputs["input_ids"].squeeze()
SCREAMING_SNAKE_CASE:List[str] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
return [len(SCREAMING_SNAKE_CASE__ ) for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Dict = torch.stack([x["input_ids"] for x in batch] )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.stack([x["attention_mask"] for x in batch] )
SCREAMING_SNAKE_CASE:int = torch.stack([x["decoder_input_ids"] for x in batch] )
SCREAMING_SNAKE_CASE:Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE:Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE:Dict = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
A_ = getLogger(__name__)
def A_ ( snake_case ):
return list(itertools.chain.from_iterable(snake_case ) )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_git_info()
save_json(snake_case , os.path.join(snake_case , "git_log.json" ) )
def A_ ( snake_case , snake_case , snake_case=4 , **snake_case ):
with open(snake_case , "w" ) as f:
json.dump(snake_case , snake_case , indent=snake_case , **snake_case )
def A_ ( snake_case ):
with open(snake_case ) as f:
return json.load(snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:int = git.Repo(search_parent_directories=snake_case )
SCREAMING_SNAKE_CASE:Any = {
"repo_id": str(snake_case ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def A_ ( snake_case , snake_case ):
return list(map(snake_case , snake_case ) )
def A_ ( snake_case , snake_case ):
with open(snake_case , "wb" ) as f:
return pickle.dump(snake_case , snake_case )
def A_ ( snake_case ):
def remove_articles(snake_case ):
return re.sub(r"\b(a|an|the)\b" , " " , snake_case )
def white_space_fix(snake_case ):
return " ".join(text.split() )
def remove_punc(snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case ) ) ) )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = normalize_answer(snake_case ).split()
SCREAMING_SNAKE_CASE:Optional[int] = normalize_answer(snake_case ).split()
SCREAMING_SNAKE_CASE:Optional[int] = Counter(snake_case ) & Counter(snake_case )
SCREAMING_SNAKE_CASE:List[str] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 1.0 * num_same / len(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = 1.0 * num_same / len(snake_case )
SCREAMING_SNAKE_CASE:str = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( snake_case , snake_case ):
return normalize_answer(snake_case ) == normalize_answer(snake_case )
def A_ ( snake_case , snake_case ):
assert len(snake_case ) == len(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for hypo, pred in zip(snake_case , snake_case ):
em += exact_match_score(snake_case , snake_case )
if len(snake_case ) > 0:
em /= len(snake_case )
return {"em": em}
def A_ ( snake_case ):
return model_prefix.startswith("rag" )
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE:Dict = "dropout_rate"
for p in extra_params:
if getattr(snake_case , snake_case , snake_case ):
if not hasattr(snake_case , snake_case ) and not hasattr(snake_case , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case ) )
delattr(snake_case , snake_case )
continue
SCREAMING_SNAKE_CASE:Optional[int] = p if hasattr(snake_case , snake_case ) else equivalent_param[p]
setattr(snake_case , snake_case , getattr(snake_case , snake_case ) )
delattr(snake_case , snake_case )
return hparams, config
| 143
| 0
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = BigBirdConfig.from_json_file(__lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCamelCase__ = BigBirdForQuestionAnswering(__lowercase )
else:
lowerCamelCase__ = BigBirdForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowercase , __lowercase , is_trivia_qa=__lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 258
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 1
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __snake_case ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = min(__A ) # min() finds the minimum value
SCREAMING_SNAKE_CASE : str = max(__A ) # max() finds the maximum value
SCREAMING_SNAKE_CASE : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__A , __A ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE : str = 0
for count in range(__A ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE : Tuple = count + min_val
i += 1
def __snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__A )
print('Sorted order is:' , ' '.join(__A ) )
if __name__ == "__main__":
main()
| 265
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : str = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
| 1
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
"""simple docstring"""
a__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase ) == len(UpperCamelCase ), f"{len(UpperCamelCase )} != {len(UpperCamelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__lowerCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__lowerCAmelCase : Optional[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
try:
a__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(UpperCamelCase ) )
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(UpperCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __snake_case ( UpperCamelCase , UpperCamelCase = "student" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
a__ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase , UpperCamelCase ):
AutoTokenizer.from_pretrained(UpperCamelCase ).save_pretrained(UpperCamelCase ) # purely for convenience
a__ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ).eval()
else:
assert isinstance(UpperCamelCase , UpperCamelCase ), f"teacher must be a model or string got type {type(UpperCamelCase )}"
a__ = teacher.config.to_diff_dict()
try:
a__ , a__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
a__ , a__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase )
# Copy weights
a__ = teacher.config_class(**UpperCamelCase )
a__ = AutoModelForSeqaSeqLM.from_config(UpperCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ = list(range(UpperCamelCase ) ), list(range(UpperCamelCase ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(UpperCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ = pick_layers_to_copy(UpperCamelCase , UpperCamelCase )
if d_layers_to_copy is None:
a__ = pick_layers_to_copy(UpperCamelCase , UpperCamelCase )
try:
if hasattr(
UpperCamelCase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
a__ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 714
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __snake_case ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
return choice(UpperCamelCase )
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
a__ = random_pivot(UpperCamelCase )
# partition based on pivot
# linear time
a__ = [e for e in lst if e < pivot]
a__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase ) < k - 1:
return kth_number(UpperCamelCase , k - len(UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 0
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _a ( __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = """pixel_values"""
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Any = TimmBackboneConfig
def __init__( self ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self ,"timm" )
super().__init__(_SCREAMING_SNAKE_CASE )
_snake_case = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(_SCREAMING_SNAKE_CASE ,"out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,"use_pretrained_backbone" ,_SCREAMING_SNAKE_CASE )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_snake_case = config.out_indices if getattr(_SCREAMING_SNAKE_CASE ,"out_indices" ,_SCREAMING_SNAKE_CASE ) is not None else (-1,)
_snake_case = timm.create_model(
config.backbone ,pretrained=_SCREAMING_SNAKE_CASE ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_snake_case = self._backbone.return_layers
_snake_case = {layer["module"]: str(_SCREAMING_SNAKE_CASE ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls ,["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_snake_case = kwargs.pop("config" ,TimmBackboneConfig() )
_snake_case = kwargs.pop("use_timm_backbone" ,_SCREAMING_SNAKE_CASE )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_snake_case = kwargs.pop("num_channels" ,config.num_channels )
_snake_case = kwargs.pop("features_only" ,config.features_only )
_snake_case = kwargs.pop("use_pretrained_backbone" ,config.use_pretrained_backbone )
_snake_case = kwargs.pop("out_indices" ,config.out_indices )
_snake_case = TimmBackboneConfig(
backbone=_SCREAMING_SNAKE_CASE ,num_channels=_SCREAMING_SNAKE_CASE ,features_only=_SCREAMING_SNAKE_CASE ,use_pretrained_backbone=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,)
return super()._from_config(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
pass
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_snake_case = self._all_layers
_snake_case = self._backbone(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = self._return_layers
_snake_case = tuple(hidden_states[i] for i in self.out_indices )
else:
_snake_case = self._backbone(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = None
_snake_case = tuple(_SCREAMING_SNAKE_CASE )
_snake_case = tuple(_SCREAMING_SNAKE_CASE ) if hidden_states is not None else None
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
_snake_case = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_SCREAMING_SNAKE_CASE ,hidden_states=_SCREAMING_SNAKE_CASE ,attentions=_SCREAMING_SNAKE_CASE )
| 185
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __a ( _UpperCamelCase: str ) -> str:
"""simple docstring"""
return "".join(sorted(_UpperCamelCase ) )
def __a ( _UpperCamelCase: str ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(_UpperCamelCase )]
UpperCamelCase_ : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
UpperCamelCase_ : Dict = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase_ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase_ : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 185
| 1
|
import math
import random
def lowercase_ ( _A : float , _A : bool = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
A : List[Any] = 0.0_2
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_A ):
# Forward propagation
lowerCamelCase__ : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase__ : Any = (expected / 100) - layer_a
# Error delta
lowerCamelCase__ : Any = layer_1_error * sigmoid_function(_A , _A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
A : List[Any] = int(input("Expected value: "))
A : List[str] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 5
|
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
pass
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ : str = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = INVOICE_URL
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : Dict = [
{
"""image""": load_image(__SCREAMING_SNAKE_CASE ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = dqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{"""score""": ANY(__SCREAMING_SNAKE_CASE ), """answer""": ANY(__SCREAMING_SNAKE_CASE ), """start""": ANY(__SCREAMING_SNAKE_CASE ), """end""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": ANY(__SCREAMING_SNAKE_CASE ), """answer""": ANY(__SCREAMING_SNAKE_CASE ), """start""": ANY(__SCREAMING_SNAKE_CASE ), """end""": ANY(__SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : Any = INVOICE_URL
snake_case__ : int = """How many cats are there?"""
snake_case__ : str = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = []
snake_case__ : Any = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , words=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : Dict = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : int = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : str = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self ):
snake_case__ : int = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__SCREAMING_SNAKE_CASE , revision="""3dc6de3""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Optional[int] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
snake_case__ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
snake_case__ : Any = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[int] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__SCREAMING_SNAKE_CASE , revision="""3dc6de3""" , max_seq_len=5_0 , )
snake_case__ : str = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : List[str] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
snake_case__ : Any = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Dict = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __UpperCamelCase ( self ):
pass
| 38
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : str =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""pixel_values"""]
def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : str = crop_size
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : int = do_convert_rgb
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 90
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = str(lowerCamelCase_)
return len(lowerCamelCase_) == 9 and set(lowerCamelCase_) == set('''123456789''')
def lowerCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(9999 ,4999 ,-1):
lowerCAmelCase__ : Union[str, Any] = 100002 * base_num
if is_9_pandigital(lowerCamelCase_):
return candidate
for base_num in range(333 ,99 ,-1):
lowerCAmelCase__ : Any = 1002003 * base_num
if is_9_pandigital(lowerCamelCase_):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 90
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : List[Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 692
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] =(DPMSolverSDEScheduler,)
lowercase : Any =10
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Union[str, Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCamelCase_ )
return config
def UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Tuple = self.get_scheduler_config()
lowercase_ :int = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Union[str, Any] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :int = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Optional[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Any = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Tuple = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Optional[int] = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Any = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :int = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :List[str] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_model()
lowercase_ :str = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ :str = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
lowercase_ :Union[str, Any] = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
lowercase_ :List[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = output.prev_sample
lowercase_ :List[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 257
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: str = logging.getLogger()
def UpperCamelCase__( )->Optional[int]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 212
|
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] )->List[str]:
A__ = [1]
for i in range(2 , UpperCamelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ = []
A__ = list(range(UpperCamelCase__ ) )
# Find permutation
while factorials:
A__ = factorials.pop()
A__ , A__ = divmod(UpperCamelCase__ , UpperCamelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = "cpu" , _SCREAMING_SNAKE_CASE : Union[str, None] = None ):
"""simple docstring"""
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 225
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__a , __a = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase__ = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 225
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 396
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowerCAmelCase :
def __init__( self : int , a : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase = len(a ) - 1
def _lowerCAmelCase ( self : str , a : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a ) , 5 ) == 1
return output_values
def _lowerCAmelCase ( self : Optional[Any] , a : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = self.basis_function(a )
lowercase = 0.0
lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCAmelCase ( self : Tuple , a : float = 0.01 ) -> int:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowercase = [] # x coordinates of points to plot
lowercase = [] # y coordinates of points to plot
lowercase = 0.0
while t <= 1:
lowercase = self.bezier_curve_function(a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase = [i[0] for i in self.list_of_points]
lowercase = [i[1] for i in self.list_of_points]
plt.plot(
a , a , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(a , a , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 396
| 1
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = list[list[int]]
# assigning initial values to the grid
_SCREAMING_SNAKE_CASE = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_SCREAMING_SNAKE_CASE = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCamelCase ( __lowerCAmelCase : Matrix , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(__A ):
snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__A , __A , __A , __A ):
snake_case = digit
if sudoku(__A ) is not None:
return grid
snake_case = 0
return None
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(__A , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_SCREAMING_SNAKE_CASE = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 369
|
from __future__ import annotations
import requests
def snake_case_ (__A : str ) -> dict:
__lowerCAmelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__A ).json()
def snake_case_ (__A : int = 1_0 ) -> list[dict]:
__lowerCAmelCase : List[Any] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowerCAmelCase : Union[str, Any] = requests.get(__A ).json()[:max_stories]
return [get_hackernews_story(__A ) for story_id in story_ids]
def snake_case_ (__A : int = 1_0 ) -> str:
__lowerCAmelCase : Optional[Any] = hackernews_top_stories(__A )
return "\n".join("""* [{title}]({url})""".format(**__A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 651
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
A = sorted(string.lower() )
return len(UpperCamelCase ) == len(set(UpperCamelCase ) )
if __name__ == "__main__":
_snake_case : Optional[Any] = input('Enter a string ').strip()
_snake_case : Any = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 524
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : Optional[int] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :List[Any] ):
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(__UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCamelCase ( self :Optional[int] ):
A = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Optional[Any]=None ):
A = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
A = os.path.join(self.diffusers_dir , "new_code.py" )
with open(__UpperCamelCase , "w" , newline="\n" ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
A = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __UpperCamelCase ) , )
# Copy consistency with a really long name
A = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("Bert" , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , __UpperCamelCase ) , )
| 524
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__a :int = '\\n\n'
__a :Any = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__a :List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int = 16 , UpperCAmelCase : bool = True , UpperCAmelCase : List[Any]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A_ = "cuda"
else:
A_ = "cuda" if torch.cuda.is_available() else "cpu"
A_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase )
A_ = model.to(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A_ = model.config.max_length - 1
else:
A_ = model.config.max_length
A_ = tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="pt" , return_attention_mask=UpperCAmelCase , ).to(UpperCAmelCase )
A_ = encodings["input_ids"]
A_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A_ = []
A_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase ) , UpperCAmelCase ) ):
A_ = min(start_index + batch_size , len(UpperCAmelCase ) )
A_ = encoded_texts[start_index:end_index]
A_ = attn_masks[start_index:end_index]
if add_start_token:
A_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase )
A_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase ), attn_mask] , dim=1 )
A_ = encoded_batch
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).logits
A_ = out_logits[..., :-1, :].contiguous()
A_ = labels[..., 1:].contiguous()
A_ = attn_mask[..., 1:].contiguous()
A_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase )}
| 86
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowercase_ ( _snake_case ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def lowercase_ ( _snake_case ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Any = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case ,id=_snake_case )
| 223
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _snake_case ( _snake_case : str ) -> bytes:
'''simple docstring'''
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case ( _snake_case : List[Any] ) -> Any:
'''simple docstring'''
_A = {}
_A = tokenizer(example['content'] , truncation=_snake_case )['input_ids']
_A = len(example['content'] ) / len(output['input_ids'] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 505
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__SCREAMING_SNAKE_CASE : Any = 128022
__SCREAMING_SNAKE_CASE : Optional[int] = 128028
@require_sentencepiece
class __lowerCAmelCase ( lowercase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Tuple =MaMaaaTokenizer
_UpperCAmelCase : List[str] =False
_UpperCAmelCase : Tuple =False
_UpperCAmelCase : int =True
def _UpperCAmelCase ( self : Optional[int] ):
super().setUp()
A_ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
A_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
A_ = Path(self.tmpdirname )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
A_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : str , **lowerCAmelCase : Tuple ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _UpperCAmelCase ( self : str , lowerCAmelCase : Any ):
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = "</s>"
A_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def _UpperCAmelCase ( self : List[Any] ):
A_ = self.get_tokenizer()
A_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
A_ = self.get_tokenizer()
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [2, 3, 4, 5, 6] , )
A_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
A_ = tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , "This is a test" )
@slow
def _UpperCAmelCase ( self : Tuple ):
# fmt: off
A_ = {"input_ids": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] ="facebook/m2m100_418M"
_UpperCAmelCase : Union[str, Any] =[
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
_UpperCAmelCase : List[Any] =[
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
_UpperCAmelCase : Any =[EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def _UpperCAmelCase ( cls : Any ):
A_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
A_ = 1
return cls
def _UpperCAmelCase ( self : str ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_80_63 )
def _UpperCAmelCase ( self : Any ):
A_ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase )
def _UpperCAmelCase ( self : str ):
A_ = "en"
A_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple ):
self.assertIn(lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
A_ = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
A_ = self.tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase )
def _UpperCAmelCase ( self : Dict ):
A_ = tempfile.mkdtemp()
A_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase )
A_ = MaMaaaTokenizer.from_pretrained(lowerCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase )
@require_torch
def _UpperCAmelCase ( self : Tuple ):
A_ = "en"
A_ = "fr"
A_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase , return_tensors="pt" )
A_ = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A_ = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _UpperCAmelCase ( self : Dict ):
A_ = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A_ = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _UpperCAmelCase ( self : List[str] ):
A_ = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[12_80_22, 58, 41_83, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_80_06,
} , )
| 452
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCAmelCase ( unittest.TestCase , lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self : List[str] ):
A_ = load_tool("text-classification" )
self.tool.setup()
A_ = load_tool("text-classification" , remote=lowerCAmelCase )
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : str ):
A_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : str ):
A_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : List[Any] ):
A_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
| 452
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
__A : Optional[int] = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "retribert"
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=128 , __lowerCamelCase : str=0 , **__lowerCamelCase : List[str] , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = share_encoders
lowerCamelCase__ = projection_dim
| 716
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : Any = get_logger()
__A : Optional[dict] = None
class lowercase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
super().__init__(features=__lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCamelCase__ = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase__ = str(jax.devices()[0] )
lowerCamelCase__ = jnp_array_kwargs
@staticmethod
def a__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__lowerCamelCase ): device for device in jax.devices()}
def a__ ( self : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCamelCase , axis=0 )
return column
def a__ ( self : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ = {"dtype": jnp.intaa}
else:
lowerCamelCase__ = {"dtype": jnp.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def a__ ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : dict ) -> int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def a__ ( self : Union[str, Any] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def a__ ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "jax.Array":
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
lowerCamelCase__ = self._consolidate(__lowerCamelCase )
return column
def a__ ( self : List[str] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCamelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 187
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A_ : List[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCAmelCase = imread("""image_data/lena.jpg""", 1)
# convert to its negative
_lowerCAmelCase = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 569
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = StableDiffusionSAGPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = False
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : Optional[int] = CLIPTextModel(a__ )
A_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCamelCase ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(a__ )
else:
A_ : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : List[Any] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
A_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : Tuple = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = """."""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : str = sag_pipe(
[prompt] , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Tuple = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowerCamelCase ( self ):
A_ : Dict = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : List[str] = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : List[str] = """."""
A_ : List[Any] = torch.manual_seed(0 )
A_ : List[str] = sag_pipe(
[prompt] , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Union[str, Any] = output.images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : str = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowerCamelCase ( self ):
A_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Tuple = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = """."""
A_ : Any = torch.manual_seed(0 )
A_ : Optional[int] = sag_pipe(
[prompt] , width=768 , height=512 , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
A_ : Optional[int] = output.images
assert image.shape == (1, 512, 768, 3)
| 569
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
a__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
a__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
with open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ = Image.open(UpperCamelCase )
return im.convert("""RGB""" )
@dataclass
class lowercase :
"""simple docstring"""
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
snake_case_ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowercase :
"""simple docstring"""
snake_case_ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase_ )} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
snake_case_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = torch.stack([example["""pixel_values"""] for example in examples] )
lowerCamelCase__ = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ = {}
if data_args.train_dir is not None:
lowerCamelCase__ = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
lowerCamelCase__ = os.path.join(data_args.validation_dir , """**""" )
lowerCamelCase__ = load_dataset(
"""imagefolder""" , data_files=UpperCamelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ = dataset["""train"""].train_test_split(data_args.train_val_split )
lowerCamelCase__ = split["""train"""]
lowerCamelCase__ = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ = dataset["""train"""].features["""labels"""].names
lowerCamelCase__ , lowerCamelCase__ = {}, {}
for i, label in enumerate(UpperCamelCase ):
lowerCamelCase__ = str(UpperCamelCase )
lowerCamelCase__ = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCamelCase__ = image_processor.size["""shortest_edge"""]
else:
lowerCamelCase__ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowerCamelCase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCamelCase__ = Compose(
[
RandomResizedCrop(UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCamelCase__ = Compose(
[
Resize(UpperCamelCase ),
CenterCrop(UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(UpperCamelCase : str ):
lowerCamelCase__ = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(UpperCamelCase : Union[str, Any] ):
lowerCamelCase__ = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowerCamelCase__ = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCamelCase )
# Initalize our trainer
lowerCamelCase__ = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCamelCase )
trainer.save_metrics("""eval""" , UpperCamelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
if __name__ == "__main__":
main()
| 235
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Tuple = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 235
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
lowerCAmelCase__ = size
lowerCAmelCase__ = [0] * size
lowerCAmelCase__ = [0] * size
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return index | (index + 1)
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return (index & (index + 1)) - 1
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
lowerCAmelCase__ = value
while index < self.size:
lowerCAmelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_next(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
right -= 1 # Because of right is exclusive
lowerCAmelCase__ = 0
while left <= right:
lowerCAmelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
lowerCAmelCase__ = current_left
else:
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCAmelCase = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(SCREAMING_SNAKE_CASE)
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 'rag'
UpperCamelCase__ : Optional[int] = True
def __init__( self , a_=None , a_=True , a_=None , a_=None , a_=None , a_=None , a_=None , a_=" / " , a_=" // " , a_=5 , a_=300 , a_=768 , a_=8 , a_="wiki_dpr" , a_="train" , a_="compressed" , a_=None , a_=None , a_=False , a_=False , a_=0.0 , a_=True , a_=False , a_=False , a_=False , a_=True , a_=None , **a_ , ):
super().__init__(
bos_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , is_encoder_decoder=a_ , prefix=a_ , vocab_size=a_ , **a_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
a__ = kwargs.pop("""question_encoder""" )
a__ = question_encoder_config.pop("""model_type""" )
a__ = kwargs.pop("""generator""" )
a__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
a__ = AutoConfig.for_model(a_ , **a_ )
a__ = AutoConfig.for_model(a_ , **a_ )
a__ = reduce_loss
a__ = label_smoothing
a__ = exclude_bos_score
a__ = do_marginalize
a__ = title_sep
a__ = doc_sep
a__ = n_docs
a__ = max_combined_length
a__ = dataset
a__ = dataset_split
a__ = index_name
a__ = retrieval_vector_size
a__ = retrieval_batch_size
a__ = passages_path
a__ = index_path
a__ = use_dummy_dataset
a__ = output_retrieved
a__ = do_deduplication
a__ = use_cache
if self.forced_eos_token_id is None:
a__ = getattr(self.generator , """forced_eos_token_id""" , a_ )
@classmethod
def _a ( cls , a_ , a_ , **a_ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a_ )
def _a ( self ):
a__ = copy.deepcopy(self.__dict__ )
a__ = self.question_encoder.to_dict()
a__ = self.generator.to_dict()
a__ = self.__class__.model_type
return output
| 701
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def A_ ( __a : Namespace ):
"""simple docstring"""
return TrainCommand(__a )
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
@staticmethod
def _a ( a_ ):
a__ = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=a_ , required=a_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=a_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=a_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=a_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=a_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=a_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=a_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=a_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=a_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=a_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=a_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=a_ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=a_ , default=1E-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=a_ )
def __init__( self , a_ ):
a__ = logging.get_logger("""transformers-cli/training""" )
a__ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=a_ )
a__ = args.output
a__ = args.column_label
a__ = args.column_text
a__ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
a__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
a__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
a__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = args.validation_split
a__ = args.train_batch_size
a__ = args.valid_batch_size
a__ = args.learning_rate
a__ = args.adam_epsilon
def _a ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _a ( self ):
raise NotImplementedError
def _a ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 351
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def wrapper(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = timeit.default_timer()
snake_case_ = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case_ = timeit.default_timer() - starttime
return delta
snake_case_ = func.__name__
return wrapper
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ):
snake_case_ = []
snake_case_ = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
snake_case_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
snake_case_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
snake_case_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
snake_case_ = v.feature
snake_case_ = seq_shapes[k]
snake_case_ = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
snake_case_ = data
dummy_data.append((i, example) )
return dummy_data
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ):
snake_case_ = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
snake_case_ = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
snake_case_ = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset
| 39
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=False , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> int:
lowerCAmelCase_ :List[str] = parent
lowerCAmelCase_ :List[str] = batch_size
lowerCAmelCase_ :List[str] = seq_length
lowerCAmelCase_ :List[str] = is_training
lowerCAmelCase_ :Optional[Any] = use_input_mask
lowerCAmelCase_ :str = use_token_type_ids
lowerCAmelCase_ :Union[str, Any] = use_labels
lowerCAmelCase_ :int = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :Tuple = num_attention_heads
lowerCAmelCase_ :str = intermediate_size
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = hidden_dropout_prob
lowerCAmelCase_ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ :int = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Any = type_sequence_label_size
lowerCAmelCase_ :str = initializer_range
lowerCAmelCase_ :Dict = num_labels
lowerCAmelCase_ :Any = num_choices
lowerCAmelCase_ :Tuple = scope
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Optional[int] = None
if self.use_labels:
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , use_stable_embedding=__UpperCamelCase , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = OpenLlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
lowerCAmelCase_ :Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> str:
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :str = OpenLlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :Tuple = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
lowerCAmelCase_ :List[str] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
lowerCAmelCase_ :Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Dict:
lowerCAmelCase_ :str = True
lowerCAmelCase_ :int = True
lowerCAmelCase_ :List[str] = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
lowerCAmelCase_ :List[str] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
lowerCAmelCase_ :Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ :Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ :Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ :List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ :str = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
lowerCAmelCase_ :Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase_ :str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ :List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ :Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :str = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ :Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ :List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Dict = False
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = OpenLlamaModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ :str = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = 3
lowerCAmelCase_ :Dict = input_dict["""input_ids"""]
lowerCAmelCase_ :List[str] = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCAmelCase_ :List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ :str = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Dict = 3
lowerCAmelCase_ :List[str] = """single_label_classification"""
lowerCAmelCase_ :Dict = input_dict["""input_ids"""]
lowerCAmelCase_ :List[str] = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCAmelCase_ :List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ :Tuple = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Dict = 3
lowerCAmelCase_ :Dict = """multi_label_classification"""
lowerCAmelCase_ :Tuple = input_dict["""input_ids"""]
lowerCAmelCase_ :Union[str, Any] = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCAmelCase_ :Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ :Optional[int] = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase_ :Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ :Union[str, Any] = OpenLlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
lowerCAmelCase_ :Union[str, Any] = original_model(__UpperCamelCase ).last_hidden_state
lowerCAmelCase_ :str = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ :Any = {"""type""": scaling_type, """factor""": 10.0}
lowerCAmelCase_ :Union[str, Any] = OpenLlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
lowerCAmelCase_ :Optional[Any] = scaled_model(__UpperCamelCase ).last_hidden_state
lowerCAmelCase_ :Any = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
| 707
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : str ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Dict = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Tuple = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :Union[str, Any] = do_resize
lowerCAmelCase_ :Optional[int] = size
lowerCAmelCase_ :Union[str, Any] = do_center_crop
lowerCAmelCase_ :Union[str, Any] = crop_size
lowerCAmelCase_ :Optional[Any] = resample
lowerCAmelCase_ :int = do_rescale
lowerCAmelCase_ :Dict = rescale_factor
lowerCAmelCase_ :List[str] = do_normalize
lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = get_resize_output_image_size(__A , size["""shortest_edge"""] , default_to_square=__A )
elif "height" in size and "width" in size:
lowerCAmelCase_ :Any = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[int]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :List[Any] = to_numpy_array(__A )
if do_resize:
lowerCAmelCase_ :List[Any] = self.resize(image=__A , size=__A , resample=__A )
if do_center_crop:
lowerCAmelCase_ :List[Any] = self.center_crop(__A , size=__A )
if do_rescale:
lowerCAmelCase_ :int = self.rescale(image=__A , scale=__A )
if do_normalize:
lowerCAmelCase_ :str = self.normalize(image=__A , mean=__A , std=__A )
lowerCAmelCase_ :Tuple = to_channel_dimension_format(__A , __A )
return image
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :int = resample if resample is not None else self.resample
lowerCAmelCase_ :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :Tuple = size if size is not None else self.size
lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :List[str] = get_size_dict(__A , param_name="""crop_size""" )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase_ :List[Any] = make_batched(__A )
lowerCAmelCase_ :Dict = [
[
self._preprocess_image(
image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , )
for img in video
]
for video in videos
]
lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": videos}
return BatchFeature(data=__A , tensor_type=__A )
| 256
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''new-model'''
if is_tf_available():
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Dict = "bert-base-cased"
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = "bert-base-cased"
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Union[str, Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Tuple ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Union[str, Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
@require_tensorflow_probability
def _A ( self : Optional[int] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 )
def _A ( self : Optional[Any] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE : str = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Optional[int] = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE : Any = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE : Dict = auto_class.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = auto_class.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _A ( self : Any ):
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("bert-base" )
def _A ( self : Optional[int] ):
with self.assertRaisesRegex(
UpperCAmelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : int = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def _A ( self : str ):
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _A ( self : Dict ):
with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model" ):
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _A ( self : Optional[int] ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 62
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 707
|
import pytest
import datasets
# Import fixture modules as plugins
_UpperCAmelCase : Tuple = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : int ) -> Any:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ (lowercase__ : str ) -> List[str]:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase__ )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path_factory.getbasetemp() / '''cache'''
lowerCAmelCase__ = test_hf_cache_home / '''datasets'''
lowerCAmelCase__ = test_hf_cache_home / '''metrics'''
lowerCAmelCase__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase__ ) )
lowerCAmelCase__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase__ ) )
lowerCAmelCase__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope='''session''' )
def lowerCAmelCase_ () -> int:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def lowerCAmelCase_ (lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase__ )
@pytest.fixture
def lowerCAmelCase_ (lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase__ )
| 288
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: int=False ) -> List[str]:
"""simple docstring"""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: Any=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__a = ''
else:
__a = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int] ) -> Any:
"""simple docstring"""
__a = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str], SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__a = dct.pop(SCREAMING_SNAKE_CASE__ )
__a = val
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
__a = ViTConfig()
__a = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__a = True
__a = int(vit_name[-12:-10] )
__a = int(vit_name[-9:-6] )
else:
__a = 1000
__a = 'huggingface/label-files'
__a = 'imagenet-1k-id2label.json'
__a = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='dataset' ), 'r' ) )
__a = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = int(vit_name[-6:-4] )
__a = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__a = 192
__a = 768
__a = 12
__a = 3
elif vit_name[9:].startswith('small' ):
__a = 384
__a = 1536
__a = 12
__a = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__a = 768
__a = 2304
__a = 8
__a = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__a = 1024
__a = 4096
__a = 24
__a = 16
elif vit_name[4:].startswith('huge' ):
__a = 1280
__a = 5120
__a = 32
__a = 16
# load original model from timm
__a = timm.create_model(SCREAMING_SNAKE_CASE__, pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
__a = create_rename_keys(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__a = ViTModel(SCREAMING_SNAKE_CASE__ ).eval()
else:
__a = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__a = DeiTImageProcessor(size=config.image_size )
else:
__a = ViTImageProcessor(size=config.image_size )
__a = image_processor(images=prepare_img(), return_tensors='pt' )
__a = encoding['pixel_values']
__a = model(SCREAMING_SNAKE_CASE__ )
if base_model:
__a = timm_model.forward_features(SCREAMING_SNAKE_CASE__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__, outputs.pooler_output, atol=1e-3 )
else:
__a = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__, outputs.logits, atol=1e-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 448
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =42
__a =42
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> list[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__a = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__a = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__a = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__a = [''] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCamelCase : Any = """Provide a string that I will generate its BWT transform: """
__UpperCamelCase : Dict = input(entry_msg).strip()
__UpperCamelCase : Optional[int] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
__UpperCamelCase : Dict = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 448
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = size
# approximate the overall size of segment tree with given value
lowerCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase = [0 for i in range(0 , 4 * size )]
lowerCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return idx * 2
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __A ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase = a[left_element - 1]
else:
lowerCAmelCase = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.build(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = False
if left_element != right_element:
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = True
lowerCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase = val
if left_element != right_element:
lowerCAmelCase = val
lowerCAmelCase = val
lowerCAmelCase = True
lowerCAmelCase = True
return True
lowerCAmelCase = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.update(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
return True
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = False
if left_element != right_element:
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = self.lazy[idx]
lowerCAmelCase = True
lowerCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase = (left_element + right_element) // 2
lowerCAmelCase = self.query(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.query(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __str__( self : Any ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase : Dict = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowercase : str = 1_5
lowercase : Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 702
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( A__ ) -> Any:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( A__ , A__ ) -> List[str]:
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def __a ( A__ , A__ , A__ , A__=None ) -> str:
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(A__ )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(A__ ).eval()
lowerCAmelCase = convert_dalle_checkpoint(A__ , A__ , save_checkpoint=A__ )
if os.path.exists(A__ ):
lowerCAmelCase = torch.load(A__ , map_location="cpu" )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" )
lowerCAmelCase = upgrade_state_dict(A__ , A__ )
hf_model.load_state_dict(A__ )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(A__ )
lowerCAmelCase = count_parameters(A__ ) + count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 159
| 0
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCAmelCase )} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
lowerCamelCase :float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase :float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
lowerCamelCase :int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
lowerCamelCase :int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( snake_case__ :DataTrainingArguments , snake_case__ :PreTrainedTokenizer , snake_case__ :bool = False , snake_case__ :Optional[str] = None , ) -> Optional[int]:
def _dataset(snake_case__ :Optional[int] , snake_case__ :Optional[int]=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""")
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size)
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def snake_case ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""")
if model_args.tokenizer_name:
_A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""")
if model_args.model_name_or_path:
_A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""")
_A = AutoModelWithLMHead.from_config(snake_case__)
model.resize_token_embeddings(len(snake_case__))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""")
if data_args.block_size <= 0:
_A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_A = min(data_args.block_size , tokenizer.max_len)
# Get datasets
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_A = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability)
else:
_A = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
_A = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
_A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=snake_case__)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = math.exp(eval_output["""eval_loss"""])
_A = {"""perplexity""": perplexity}
_A = os.path.join(training_args.output_dir , """eval_results_lm.txt""")
if trainer.is_world_master():
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , snake_case__ , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
results.update(snake_case__)
return results
def snake_case ( snake_case__ :Dict) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 401
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 299
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = min(_UpperCamelCase )
# create the counting array
__UpperCAmelCase : List[str] = coll_max + 1 - coll_min
__UpperCAmelCase : int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCamelCase ) ):
__UpperCAmelCase : str = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return "".join([chr(_UpperCamelCase ) for i in counting_sort([ord(_UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Any = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 299
| 1
|
'''simple docstring'''
import os
import sys
__UpperCamelCase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Tuple:
"""simple docstring"""
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 26
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A_ : Optional[int] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A_ : int = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
A_ : str = "|".join(sys.argv[1:])
A_ : Optional[int] = re.compile(rf"^({joined_dirs}).*?\.py$")
A_ : Dict = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 696
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase ):
__lowerCamelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : int = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(UpperCAmelCase )
__lowerCamelCase : str = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : List[str] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
@slow
def lowerCamelCase__ ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase )
__lowerCamelCase : List[Any] = FlaxRobertaModel.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Optional[Any] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
def lowerCamelCase__ ( self : List[str] ):
with self.assertRaisesRegex(
UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCamelCase : Any = FlaxAutoModel.from_pretrained("bert-base" )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCamelCase : Optional[Any] = FlaxAutoModel.from_pretrained(UpperCAmelCase , revision="aaaaaa" )
def lowerCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
UpperCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
__lowerCamelCase : Optional[Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def lowerCamelCase__ ( self : Any ):
with self.assertRaisesRegex(UpperCAmelCase , "Use `from_pt=True` to load this model" ):
__lowerCamelCase : Dict = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 646
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( a__ ):
snake_case__ = "deformable_detr"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=3 , UpperCAmelCase : Any=300 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : int=6 , UpperCAmelCase : Any=1024 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.2_5 , UpperCAmelCase : str=False , **UpperCAmelCase : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Tuple = backbone_config.get("model_type" )
__lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : Tuple = use_timm_backbone
__lowerCamelCase : Any = backbone_config
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : Any = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Union[str, Any] = dropout
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : Dict = init_xavier_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : int = auxiliary_loss
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : int = backbone
__lowerCamelCase : Union[str, Any] = use_pretrained_backbone
__lowerCamelCase : Any = dilation
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : Tuple = encoder_n_points
__lowerCamelCase : Dict = decoder_n_points
__lowerCamelCase : Tuple = two_stage
__lowerCamelCase : Any = two_stage_num_proposals
__lowerCamelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Tuple = mask_loss_coefficient
__lowerCamelCase : Tuple = dice_loss_coefficient
__lowerCamelCase : Optional[Any] = bbox_loss_coefficient
__lowerCamelCase : List[str] = giou_loss_coefficient
__lowerCamelCase : List[Any] = eos_coefficient
__lowerCamelCase : List[Any] = focal_alpha
__lowerCamelCase : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Any ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.d_model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 646
| 1
|
from math import ceil
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = list(range(0 , SCREAMING_SNAKE_CASE_ ) )
__SCREAMING_SNAKE_CASE : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE_ )
# Missing blocks
__SCREAMING_SNAKE_CASE : str = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE : int = [i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(SCREAMING_SNAKE_CASE_ ) )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(SCREAMING_SNAKE_CASE_ ) )
__SCREAMING_SNAKE_CASE : str = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE_ ) ) )
__SCREAMING_SNAKE_CASE : Any = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 702
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
__SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : List[str] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : int = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
__SCREAMING_SNAKE_CASE : Optional[int] = -(labels.shape[-1] * loss.item())
__SCREAMING_SNAKE_CASE : int = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 131
| 0
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase : Union[str, Any] = get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = "dummy_data"
__magic_name__ = "datasets"
__magic_name__ = False
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = dataset_name
_lowerCAmelCase : Any = cache_dir
_lowerCAmelCase : str = use_local_dummy_data
_lowerCAmelCase : Dict = config
# download_callbacks take a single url as input
_lowerCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCAmelCase : Optional[int] = str(_A )
# to be downloaded
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
@property
def a ( self ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCAmelCase : Any = self.download_dummy_data()
return self._dummy_file
@property
def a ( self ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a ( self ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCAmelCase : Any = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def a ( self ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a ( self ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a ( self ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a ( self , snake_case__ , *snake_case__ ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCAmelCase : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCAmelCase : int = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def a ( self , snake_case__ , *snake_case__ ):
'''simple docstring'''
return self.download_and_extract(_A )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return self.download_and_extract(_A )
def a ( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return path
def a ( self ):
'''simple docstring'''
return {}
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
_lowerCAmelCase : int = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
_lowerCAmelCase : Optional[int] = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
_lowerCAmelCase : int = single_urls
_lowerCAmelCase : Dict = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
_lowerCAmelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCAmelCase : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCAmelCase : Optional[Any] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _A ) ) for url in data_url )
_lowerCAmelCase : Union[str, Any] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCAmelCase : Optional[Any] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCAmelCase : Dict = os.path.join(_A , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCAmelCase : Any = os.path.join(_A , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
pass
def a ( self , snake_case__ ):
'''simple docstring'''
def _iter_archive_members(snake_case__ ):
# this preserves the order of the members inside the ZIP archive
_lowerCAmelCase : Tuple = Path(self.dummy_file ).parent
_lowerCAmelCase : Any = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCAmelCase : int = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
_lowerCAmelCase : Any = Path(_A )
_lowerCAmelCase : int = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('rb' )
def a ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(_A , _A ):
_lowerCAmelCase : Optional[int] = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_A , _A )
| 444
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , **_A) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A)
| 485
| 0
|
import numpy as np
def lowerCAmelCase ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.where(vector > 0 , _lowerCAmelCase , (alpha * (np.exp(_lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :List[str] , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :int , **lowerCamelCase :str ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[str] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :str , **lowerCamelCase :List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Any , **lowerCamelCase :Dict ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :int , **lowerCamelCase :Dict ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :str ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Tuple , *lowerCamelCase :str , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :List[str] , **lowerCamelCase :Any ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :List[Any] , **lowerCamelCase :Optional[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Dict ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :str ) -> str:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :str , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :Dict , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Tuple , *lowerCamelCase :Optional[Any] , **lowerCamelCase :Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :Optional[int] , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Optional[Any] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :List[Any] ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Optional[int] , **lowerCamelCase :str ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :str , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :List[Any] , **lowerCamelCase :Any ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :Tuple ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :int , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :int , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
| 364
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
import numpy as np
def A__ ( _a : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( lowerCamelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( lowerCamelCase__ : int ) -> list[int]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCamelCase_ : Optional[int] =[]
for num in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ : Any =0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase_ : List[str] =odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def _snake_case ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 244
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : str , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Dict = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : Dict , **snake_case__ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : int , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : List[Any] , *snake_case__ : Optional[int] , **snake_case__ : int ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Optional[int] , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Optional[int] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Union[str, Any] , *snake_case__ : Tuple , **snake_case__ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : List[str] , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[Any] = ["torch"]
def __init__( self : str , *snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Tuple = ["torch"]
def __init__( self : Any , *snake_case__ : str , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Optional[Any] , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[Any] = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : str ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : Any , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : List[Any] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :str = ["torch"]
def __init__( self : List[Any] , *snake_case__ : int , **snake_case__ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Tuple , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : str , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[Any] = ["torch"]
def __init__( self : int , *snake_case__ : List[Any] , **snake_case__ : int ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : Dict , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
def _snake_case ( *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ) -> int:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[Any] ) -> Tuple:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Any ) -> Tuple:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> Dict:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : int , **lowerCamelCase__ : int ) -> Any:
requires_backends(lowerCamelCase__ , ["torch"] )
def _snake_case ( *lowerCamelCase__ : int , **lowerCamelCase__ : int ) -> Union[str, Any]:
requires_backends(lowerCamelCase__ , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[Any] = ["torch"]
def __init__( self : Tuple , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Any , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :str = ["torch"]
def __init__( self : int , *snake_case__ : Dict , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Dict , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Dict = ["torch"]
def __init__( self : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : List[Any] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[int] = ["torch"]
def __init__( self : str , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :str = ["torch"]
def __init__( self : Any , *snake_case__ : Optional[Any] , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *snake_case__ : Optional[int] , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Union[str, Any] , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : List[str] , *snake_case__ : Tuple , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : str , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Any = ["torch"]
def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Dict , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[Any] = ["torch"]
def __init__( self : Any , *snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Any , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : int ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[int] = ["torch"]
def __init__( self : Optional[int] , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : List[str] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : str , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Optional[int] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : int , *snake_case__ : Optional[Any] , **snake_case__ : str ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[int] = ["torch"]
def __init__( self : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *snake_case__ : List[str] , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : str , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Any , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : List[str] , *snake_case__ : Any , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : str , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Dict , *snake_case__ : Any , **snake_case__ : str ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : List[str] , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Optional[int] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : Dict , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : int , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : str , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Dict , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :str = ["torch"]
def __init__( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : str ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : int , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : Tuple , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :str = ["torch"]
def __init__( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Tuple , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[int] = ["torch"]
def __init__( self : Tuple , *snake_case__ : Tuple , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : List[Any] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : str , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : Tuple , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[int] = ["torch"]
def __init__( self : Dict , *snake_case__ : int , **snake_case__ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : List[str] , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Tuple = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : Tuple , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : Union[str, Any] , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Tuple = ["torch"]
def __init__( self : Union[str, Any] , *snake_case__ : Tuple , **snake_case__ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *snake_case__ : Any , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : str , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Any , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *snake_case__ : Dict , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Any = ["torch"]
def __init__( self : Tuple , *snake_case__ : int , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : str , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : int ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Any , **snake_case__ : int ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : int , *snake_case__ : Any , **snake_case__ : int ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Any , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Optional[Any] = ["torch"]
def __init__( self : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Tuple , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :List[str] = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *snake_case__ : Optional[int] , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *snake_case__ : Tuple , **snake_case__ : Optional[int] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :int = ["torch"]
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Dict ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Dict , *snake_case__ : Dict , **snake_case__ : str ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : Any , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : str , **snake_case__ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Union[str, Any] = ["torch"]
def __init__( self : str , *snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *snake_case__ : Any , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *snake_case__ : int , **snake_case__ : Any ):
requires_backends(cls , ["torch"] )
class lowercase__ ( metaclass=snake_case__ ):
_UpperCAmelCase :Any = ["torch"]
def __init__( self : int , *snake_case__ : Any , **snake_case__ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
requires_backends(cls , ["torch"] )
| 244
| 1
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
create_state_space_tree(a__ , [] , 0 )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->None:
'''simple docstring'''
if index == len(a__ ):
print(a__ )
return
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 547
|
from __future__ import annotations
from collections import Counter
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> None:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : float) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowercase_)
if nodea not in self.connections:
self.add_node(lowercase_)
_UpperCamelCase = probability
def __UpperCAmelCase ( self : Any) -> list[str]:
"""simple docstring"""
return list(self.connections)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->dict[str, int]:
'''simple docstring'''
_UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(a__ , a__ , a__ )
_UpperCamelCase = Counter(graph.get_nodes() )
_UpperCamelCase = start
for _ in range(a__ ):
_UpperCamelCase = graph.transition(a__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = """laion/clap-htsat-unfused"""
snake_case__ = tempfile.mkdtemp()
def __magic_name__ ( self : Optional[int] , **UpperCamelCase__ : List[str]):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__)
def __magic_name__ ( self : Dict , **UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_feature_extractor()
snake_case__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case__ = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase__)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
snake_case__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
snake_case__ = self.get_feature_extractor(do_normalize=UpperCamelCase__ , padding_value=1.0)
snake_case__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase__)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
snake_case__ = floats_list((3, 1_0_0_0))
snake_case__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""")
snake_case__ = processor(audios=UpperCamelCase__ , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
snake_case__ = """This is a test string"""
snake_case__ = processor(text=UpperCamelCase__)
snake_case__ = tokenizer(UpperCamelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
snake_case__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ = processor.batch_decode(UpperCamelCase__)
snake_case__ = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 709
|
def _UpperCAmelCase ( a : int = 1000 ):
snake_case__ , snake_case__ = 1, 1
snake_case__ = 2
while True:
snake_case__ = 0
snake_case__ = fa + fa
snake_case__ , snake_case__ = fa, f
index += 1
for _ in str(a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 99
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase = ""
__lowerCamelCase = ""
__lowerCamelCase = ""
__lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def lowercase ( ) -> None:
__magic_name__ , __magic_name__ = get_dataset(__UpperCamelCase , __UpperCamelCase )
print('''Processing...''' )
__magic_name__ , __magic_name__ , __magic_name__ = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__magic_name__ = random_chars(32 )
__magic_name__ = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__magic_name__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
__magic_name__ = []
for anno in new_annos[index]:
__magic_name__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[list, list]:
__magic_name__ = []
__magic_name__ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '''*.txt''' ) ):
__magic_name__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
__magic_name__ = in_file.readlines()
__magic_name__ = os.path.join(__UpperCamelCase , f'''{label_name}.jpg''' )
__magic_name__ = []
for obj_list in obj_lists:
__magic_name__ = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> tuple[list, list, list]:
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
for idx in range(len(__UpperCamelCase ) ):
__magic_name__ = []
__magic_name__ = img_list[idx]
path_list.append(__UpperCamelCase )
__magic_name__ = anno_list[idx]
__magic_name__ = cva.imread(__UpperCamelCase )
if flip_type == 1:
__magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__magic_name__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__magic_name__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def lowercase ( __UpperCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__magic_name__ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 490
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : int =tempfile.mkdtemp()
__snake_case : Tuple =BlipImageProcessor()
__snake_case : str =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__snake_case : Optional[int] =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__snake_case : Optional[int] =InstructBlipProcessor(a , a , a )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Dict , **a : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def _UpperCamelCase ( self : Union[str, Any] , **a : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _UpperCamelCase ( self : List[Any] , **a : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).qformer_tokenizer
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : str =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__snake_case : Tuple =[Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Tuple =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case : Tuple =self.get_image_processor(do_normalize=a , padding_value=1.0 )
__snake_case : List[Any] =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
self.assertIsInstance(processor.qformer_tokenizer , a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =self.get_image_processor()
__snake_case : Tuple =self.get_tokenizer()
__snake_case : Dict =self.get_qformer_tokenizer()
__snake_case : Optional[int] =InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
__snake_case : Optional[Any] =self.prepare_image_inputs()
__snake_case : List[str] =image_processor(a , return_tensors='''np''' )
__snake_case : List[Any] =processor(images=a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Any =self.get_image_processor()
__snake_case : List[Any] =self.get_tokenizer()
__snake_case : List[str] =self.get_qformer_tokenizer()
__snake_case : Any =InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
__snake_case : Optional[int] ='''lower newer'''
__snake_case : List[str] =processor(text=a )
__snake_case : List[Any] =tokenizer(a , return_token_type_ids=a )
__snake_case : Dict =qformer_tokenizer(a , return_token_type_ids=a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : int =self.get_image_processor()
__snake_case : Any =self.get_tokenizer()
__snake_case : Tuple =self.get_qformer_tokenizer()
__snake_case : Optional[int] =InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
__snake_case : Dict ='''lower newer'''
__snake_case : Tuple =self.prepare_image_inputs()
__snake_case : int =processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Optional[Any] =self.get_image_processor()
__snake_case : Dict =self.get_tokenizer()
__snake_case : List[str] =self.get_qformer_tokenizer()
__snake_case : List[Any] =InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
__snake_case : List[str] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Tuple =processor.batch_decode(a )
__snake_case : Tuple =tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : str =self.get_image_processor()
__snake_case : List[str] =self.get_tokenizer()
__snake_case : Optional[int] =self.get_qformer_tokenizer()
__snake_case : Optional[Any] =InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
__snake_case : List[str] ='''lower newer'''
__snake_case : Any =self.prepare_image_inputs()
__snake_case : List[Any] =processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 497
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase_ : List[str] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowercase ( ) -> Optional[Any]:
__snake_case : List[Any] =Github(os.environ['''GITHUB_TOKEN'''] )
__snake_case : int =g.get_repo('''huggingface/diffusers''' )
__snake_case : Any =repo.get_issues(state='''open''' )
for issue in open_issues:
__snake_case : Dict =sorted(issue.get_comments() , key=lambda a : i.created_at , reverse=a )
__snake_case : Dict =comments[0] if len(a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 497
| 1
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
_A = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_A = DatasetInfosDict.from_directory(_snake_case )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : DatasetInfo ) -> Optional[Any]:
'''simple docstring'''
_A = str(_snake_case )
dataset_info.write_to_directory(_snake_case )
_A = DatasetInfo.from_directory(_snake_case )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_snake_case , 'dataset_info.json' ) )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A = dataset_info._to_yaml_dict()
assert sorted(_snake_case ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A = yaml.safe_dump(_snake_case )
_A = yaml.safe_load(_snake_case )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ) -> str:
'''simple docstring'''
_A = DatasetInfo()
_A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def _snake_case ( _snake_case : Dict , _snake_case : DatasetInfosDict ) -> Union[str, Any]:
'''simple docstring'''
_A = str(_snake_case )
dataset_infos_dict.write_to_directory(_snake_case )
_A = DatasetInfosDict.from_directory(_snake_case )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_snake_case , 'README.md' ) )
| 7
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( a__ , a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = old_name
if "patch_embed" in old_name:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = old_name.split('.' )
if layer == "0":
__SCREAMING_SNAKE_CASE = old_name.replace('0' , 'convolution1' )
elif layer == "1":
__SCREAMING_SNAKE_CASE = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__SCREAMING_SNAKE_CASE = old_name.replace('3' , 'convolution2' )
else:
__SCREAMING_SNAKE_CASE = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , a__ ):
__SCREAMING_SNAKE_CASE = R'\b\d{2}\b'
if bool(re.search(a__ , a__ ) ):
__SCREAMING_SNAKE_CASE = re.search(R'\d\.\d\d.' , a__ ).group()
else:
__SCREAMING_SNAKE_CASE = re.search(R'\d\.\d.' , a__ ).group()
if int(match[0] ) < 6:
__SCREAMING_SNAKE_CASE = old_name.replace(a__ , '' )
__SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__SCREAMING_SNAKE_CASE = 'intermediate_stages.' + trimmed_name
else:
__SCREAMING_SNAKE_CASE = old_name.replace(a__ , '' )
if int(match[2] ) < num_meta4D_last_stage:
__SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage )
__SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__SCREAMING_SNAKE_CASE = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__SCREAMING_SNAKE_CASE = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__SCREAMING_SNAKE_CASE = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__SCREAMING_SNAKE_CASE = trimmed_name.replace('fc2' , 'linear_out' )
__SCREAMING_SNAKE_CASE = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , a__ ):
__SCREAMING_SNAKE_CASE = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__SCREAMING_SNAKE_CASE = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__SCREAMING_SNAKE_CASE = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__SCREAMING_SNAKE_CASE = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__SCREAMING_SNAKE_CASE = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__SCREAMING_SNAKE_CASE = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__SCREAMING_SNAKE_CASE = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__SCREAMING_SNAKE_CASE = new_name.replace('norm' , 'layernorm' )
__SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name
else:
__SCREAMING_SNAKE_CASE = 'efficientformer.encoder.' + new_name
return new_name
def __lowercase ( a__ , a__ ) -> str:
for key in checkpoint.copy().keys():
__SCREAMING_SNAKE_CASE = checkpoint.pop(a__ )
__SCREAMING_SNAKE_CASE = val
return checkpoint
def __lowercase ( ) -> Any:
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
def __lowercase ( a__ , a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )['model']
__SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(a__ )
__SCREAMING_SNAKE_CASE = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1
__SCREAMING_SNAKE_CASE = convert_torch_checkpoint(a__ , a__ )
model.load_state_dict(a__ )
model.eval()
__SCREAMING_SNAKE_CASE = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = 2_56
__SCREAMING_SNAKE_CASE = 2_24
__SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values
# original processing pipeline
__SCREAMING_SNAKE_CASE = Compose(
[
Resize(a__ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(a__ ),
ToTensor(),
Normalize(a__ , a__ ),
] )
__SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 )
assert torch.allclose(a__ , a__ )
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = (1, 10_00)
if "l1" in model_name:
__SCREAMING_SNAKE_CASE = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__SCREAMING_SNAKE_CASE = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__SCREAMING_SNAKE_CASE = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(a__ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=a__ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=a__ , )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 148
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 430
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
__a = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
_snake_case = AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
# test with a raw waveform
_snake_case = np.zeros((34000,) )
_snake_case = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowercase ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
_snake_case , _snake_case = examples
_snake_case = audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
_snake_case = audio_classifier(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def lowercase ( self : Optional[int] , _lowerCamelCase : str ):
import datasets
# test with a local file
_snake_case = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
_snake_case = dataset[0]['''audio''']['''array''']
_snake_case = audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
@require_torch
def lowercase ( self : Union[str, Any] ):
_snake_case = '''anton-l/wav2vec2-random-tiny-classifier'''
_snake_case = pipeline('''audio-classification''' , model=_lowerCamelCase )
_snake_case = np.ones((8000,) )
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
_snake_case = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
_snake_case = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_snake_case = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase ( self : Optional[int] ):
import datasets
_snake_case = '''superb/wav2vec2-base-superb-ks'''
_snake_case = pipeline('''audio-classification''' , model=_lowerCamelCase )
_snake_case = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
_snake_case = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase ( self : Optional[int] ):
pass
| 430
| 1
|
"""simple docstring"""
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'microsoft/speecht5_tts'
__UpperCAmelCase : Dict = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
__UpperCAmelCase : Optional[Any] = 'text_reader'
__UpperCAmelCase : List[Any] = SpeechTaProcessor
__UpperCAmelCase : List[str] = SpeechTaForTextToSpeech
__UpperCAmelCase : Any = SpeechTaHifiGan
__UpperCAmelCase : str = ['text']
__UpperCAmelCase : Optional[int] = ['audio']
def __UpperCAmelCase ( self ):
if self.post_processor is None:
__a = '''microsoft/speecht5_hifigan'''
super().setup()
def __UpperCAmelCase ( self , _a , _a=None ):
__a = self.pre_processor(text=_lowerCAmelCase , return_tensors='''pt''' , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__a = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__a = torch.tensor(embeddings_dataset[7_305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCAmelCase ( self , _a ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def __UpperCAmelCase ( self , _a ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 695
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> str:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = FlaxBertModelTester(self )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 588
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Union[str, Any] , ) -> None:
snake_case__ : Optional[Any] = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> None:
snake_case__ : List[Any] = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print('''''' )
print(len(UpperCamelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 703
|
'''simple docstring'''
import argparse
import os
import re
_lowercase : str ="src/transformers"
# Pattern that looks at the indentation in a line.
_lowercase : List[Any] =re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Optional[Any] =re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Tuple =re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : List[Any] =re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : int =re.compile(R"\[([^\]]+)\]")
def __UpperCAmelCase ( UpperCamelCase__ :List[str] ) -> Tuple:
snake_case__ : str = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int="" , UpperCamelCase__ :Optional[int]=None , UpperCamelCase__ :str=None ) -> int:
snake_case__ : Union[str, Any] = 0
snake_case__ : int = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
snake_case__ : Dict = ['''\n'''.join(lines[:index] )]
else:
snake_case__ : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : int = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
snake_case__ : Any = [lines[index + 1]]
index += 1
else:
snake_case__ : Any = []
else:
blocks.append('''\n'''.join(UpperCamelCase__ ) )
snake_case__ : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append('''\n'''.join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def __UpperCAmelCase ( UpperCamelCase__ :Union[str, Any] ) -> int:
def _inner(UpperCamelCase__ :Dict ):
return key(UpperCamelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :List[str]=None ) -> Optional[Any]:
# If no key is provided, we use a noop.
def noop(UpperCamelCase__ :List[str] ):
return x
if key is None:
snake_case__ : Optional[Any] = noop
# Constants are all uppercase, they go first.
snake_case__ : Dict = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : Optional[int] = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : Any = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
snake_case__ : Union[str, Any] = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(UpperCamelCase__ :Union[str, Any] ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case__ : Dict = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Tuple = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
snake_case__ : Optional[int] = import_statement.split('''\n''' )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Optional[Any] = 2 if lines[1].strip() == '''[''' else 1
snake_case__ : Union[str, Any] = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : Dict = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
snake_case__ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : Any = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Dict = keys[:-1]
snake_case__ : Union[str, Any] = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Dict = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict=True ) -> Dict:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[Any] = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Any = main_blocks[block_idx]
snake_case__ : Dict = block.split('''\n''' )
# Get to the start of the imports.
snake_case__ : List[str] = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : List[Any] = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] )
snake_case__ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : List[str] = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[int] = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Any = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
snake_case__ : Optional[int] = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : Dict = 0
snake_case__ : Dict = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : int = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase__ ) )
def __UpperCAmelCase ( UpperCamelCase__ :Optional[Any]=True ) -> Union[str, Any]:
snake_case__ : str = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
snake_case__ : int = sort_imports(os.path.join(UpperCamelCase__ , '''__init__.py''' ) , check_only=UpperCamelCase__ )
if result:
snake_case__ : Optional[Any] = [os.path.join(UpperCamelCase__ , '''__init__.py''' )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : Tuple =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 574
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.dummy_uncond_unet
A_ : Optional[Any] = PNDMScheduler()
A_ : Optional[Any] = PNDMPipeline(unet=snake_case , scheduler=snake_case )
pndm.to(snake_case )
pndm.set_progress_bar_config(disable=snake_case )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Tuple = pndm(generator=snake_case , num_inference_steps=20 , output_type="numpy" ).images
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : str = pndm(generator=snake_case , num_inference_steps=20 , output_type="numpy" , return_dict=snake_case )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = "google/ddpm-cifar10-32"
A_ : int = UNetaDModel.from_pretrained(snake_case )
A_ : Dict = PNDMScheduler()
A_ : Optional[int] = PNDMPipeline(unet=snake_case , scheduler=snake_case )
pndm.to(snake_case )
pndm.set_progress_bar_config(disable=snake_case )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : str = pndm(generator=snake_case , output_type="numpy" ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 454
|
import logging
from transformers import PretrainedConfig
_lowerCAmelCase : str = logging.getLogger(__name__)
_lowerCAmelCase : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''bertabs'''
def __init__( self :Optional[int] , snake_case :Any=30_522 , snake_case :List[str]=512 , snake_case :str=6 , snake_case :int=512 , snake_case :Optional[Any]=8 , snake_case :Tuple=512 , snake_case :str=0.2 , snake_case :Any=6 , snake_case :Optional[Any]=768 , snake_case :Optional[Any]=8 , snake_case :List[Any]=2_048 , snake_case :Dict=0.2 , **snake_case :List[str] , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : List[str] = vocab_size
A_ : int = max_pos
A_ : Tuple = enc_layers
A_ : Tuple = enc_hidden_size
A_ : str = enc_heads
A_ : Optional[Any] = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : List[str] = dec_layers
A_ : List[Any] = dec_hidden_size
A_ : Optional[int] = dec_heads
A_ : Any = dec_ff_size
A_ : Optional[int] = dec_dropout
| 454
| 1
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ : Dict = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase_ : str = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase_ : Dict = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Union[str, Any] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Tuple = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _lowerCamelCase ( lowercase : str , lowercase : Tuple ) -> List[Any]:
for tf_name, hf_name in patterns:
_a = k.replace(lowercase , lowercase )
return k
def _lowerCamelCase ( lowercase : dict , lowercase : dict ) -> BigBirdPegasusForConditionalGeneration:
_a = BigBirdPegasusConfig(**lowercase )
_a = BigBirdPegasusForConditionalGeneration(lowercase )
_a = torch_model.state_dict()
_a = {}
# separating decoder weights
_a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = DECODER_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = REMAINING_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_a = mapping["model.embed_positions.weight"]
_a = mapping.pop("model.embed_positions.weight" )
_a , _a = torch_model.load_state_dict(lowercase , strict=lowercase )
_a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCamelCase ( lowercase : Tuple ) -> Dict:
_a = tf.train.list_variables(lowercase )
_a = {}
_a = ["global_step"]
for name, shape in tqdm(lowercase , desc="converting tf checkpoint to dict" ):
_a = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a = tf.train.load_variable(lowercase , lowercase )
_a = array
return tf_weights
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : dict ) -> List[str]:
_a = get_tf_weights_as_numpy(lowercase )
_a = convert_bigbird_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
lowerCAmelCase_ : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 521
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ : Any = 'path-to-your-trained-model'
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowerCAmelCase_ : Optional[Any] = 'A photo of sks dog in a bucket'
lowerCAmelCase_ : Tuple = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 521
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
a__ = os.path.abspath(__UpperCAmelCase )
logger.info(f"Loading PyTorch weights from {pt_path}" )
a__ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
a__ = convert_pytorch_state_dict_to_flax(__UpperCAmelCase , __UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
a__ = convert_pytorch_sharded_state_dict_to_flax(__UpperCAmelCase , __UpperCAmelCase )
return flax_state_dict
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
def is_key_or_prefix_key_in_dict(__UpperCAmelCase ) -> bool:
return len(set(__UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
a__ = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
a__ = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
a__ = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
a__ = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
a__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
a__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
a__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
a__ = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
a__ = pt_tuple_key[-2] + '''_v'''
if name is not None:
a__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# convert pytorch tensor to numpy
a__ = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
a__ = flax_model.params['''params''']
else:
a__ = flax_model.params
a__ = flatten_dict(__UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__UpperCAmelCase )
a__ = {}
a__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
a__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
a__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ = rename_key_and_reshape_tensor(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# add model prefix if necessary
a__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
a__ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
import torch
# Load the index
a__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
a__ = torch.load(__UpperCAmelCase )
a__ = {k: v.numpy() for k, v in pt_state_dict.items()}
a__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
a__ = flax_model.params['''params''']
a__ = flatten_dict(__UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
a__ = flax_model.params
a__ = flatten_dict(__UpperCAmelCase )
a__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
a__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
a__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = pt_tuple_key[1:]
# Correctly rename weight parameters
a__ , a__ = rename_key_and_reshape_tensor(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# add model prefix if necessary
a__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
a__ = jnp.asarray(__UpperCAmelCase )
continue
if "var" in flax_key[-1]:
a__ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
a__ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = os.path.abspath(__UpperCAmelCase )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
a__ = getattr(__UpperCAmelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__UpperCAmelCase , '''rb''' ) as state_f:
try:
a__ = from_bytes(__UpperCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
a__ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
a__ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
a__ = flatten_dict(__UpperCAmelCase )
a__ = pt_model.state_dict()
a__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
a__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
a__ = []
a__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a__ = flax_key_tuple[0] == pt_model.base_model_prefix
a__ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
a__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
a__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# conv layer
a__ = flax_key_tuple[:-1] + ('''weight''',)
a__ = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# linear layer
a__ = flax_key_tuple[:-1] + ('''weight''',)
a__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
a__ = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
a__ = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
a__ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
a__ = '''.'''.join(__UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
a__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
a__ = key.split('''.''' )
a__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
a__ = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
a__ = key_components[-2] + '''_v'''
if name is not None:
a__ = key_components[:-3] + [name]
a__ = '''.'''.join(__UpperCAmelCase )
a__ = key
if flax_key in special_pt_names:
a__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
a__ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
a__ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
a__ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(__UpperCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 194
|
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
a__ = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
a__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def __a ( __UpperCAmelCase , __UpperCAmelCase = None ):
if log_level is None:
a__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __UpperCAmelCase )
a__ = logging.getLogger(__UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__UpperCAmelCase , {} )
| 194
| 1
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=24 , lowerCamelCase=2 , lowerCamelCase=6 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=10_00 , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : List[Any] = is_training
UpperCamelCase : Optional[Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : str = scope
UpperCamelCase : Dict = range_bbox
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : List[Any] = bbox[i, j, 3]
UpperCamelCase : Optional[Any] = bbox[i, j, 1]
UpperCamelCase : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : Tuple = bbox[i, j, 2]
UpperCamelCase : List[str] = bbox[i, j, 0]
UpperCamelCase : List[str] = t
UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Tuple = None
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
'''simple docstring'''
UpperCamelCase : List[str] = LiltModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[Any] = model(lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
UpperCamelCase : Any = model(lowerCamelCase , bbox=lowerCamelCase , token_type_ids=lowerCamelCase )
UpperCamelCase : Dict = model(lowerCamelCase , bbox=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Any = LiltForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Tuple = model(
lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = LiltForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Any = model(
lowerCamelCase , bbox=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : int = config_and_inputs
UpperCamelCase : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = LiltModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : str = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = LiltModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(lowerCamelCase )
UpperCamelCase : Dict = torch.tensor([[1, 2]] , device=lowerCamelCase )
UpperCamelCase : Dict = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase : List[str] = model(input_ids=lowerCamelCase , bbox=lowerCamelCase )
UpperCamelCase : Dict = torch.Size([1, 2, 7_68] )
UpperCamelCase : Tuple = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase , atol=1e-3 ) )
| 435
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''timesformer'''
def __init__( self , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=8 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase="divided_space_time" , lowerCamelCase=0 , **lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : int = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = qkv_bias
UpperCamelCase : int = attention_type
UpperCamelCase : int = drop_path_rate
| 435
| 1
|
from typing import Any
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any ):
a__ : List[str] = data
a__ : List[Any] = None
def __repr__( self : Tuple ):
return f'''Node({self.data})'''
class A__ :
"""simple docstring"""
def __init__( self : Dict ):
a__ : Union[str, Any] = None
def __iter__( self : List[Any] ):
a__ : List[str] = self.head
while node:
yield node.data
a__ : List[Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : List[Any] ):
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self : Dict , lowerCamelCase__ : int ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
a__ : int = self.head
for _ in range(lowerCamelCase__ ):
a__ : Union[str, Any] = current.next
a__ : Optional[int] = data
def _UpperCamelCase( self : Any , lowerCamelCase__ : Any ):
self.insert_nth(len(self ) , lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any ):
self.insert_nth(0 , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
a__ : Union[str, Any] = Node(lowerCamelCase__ )
if self.head is None:
a__ : List[Any] = new_node
elif index == 0:
a__ : str = self.head # link new_node to head
a__ : int = new_node
else:
a__ : List[Any] = self.head
for _ in range(index - 1 ):
a__ : Optional[int] = temp.next
a__ : Optional[int] = temp.next
a__ : Dict = new_node
def _UpperCamelCase( self : List[Any] ): # print every node data
print(self )
def _UpperCamelCase( self : List[str] ):
return self.delete_nth(0 )
def _UpperCamelCase( self : Dict ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _UpperCamelCase( self : Any , lowerCamelCase__ : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
a__ : Optional[int] = self.head # default first node
if index == 0:
a__ : Tuple = self.head.next
else:
a__ : List[Any] = self.head
for _ in range(index - 1 ):
a__ : int = temp.next
a__ : Tuple = temp.next
a__ : int = temp.next.next
return delete_node.data
def _UpperCamelCase( self : Optional[int] ):
return self.head is None
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = None
a__ : List[Any] = self.head
while current:
# Store the current node's next node.
a__ : Tuple = current.next
# Make the current node's next point backwards
a__ : int = prev
# Make the previous node be the current node
a__ : Optional[Any] = current
# Make the current node the next node (to progress iteration)
a__ : Tuple = next_node
# Return prev in order to put the head at the end
a__ : Optional[Any] = prev
def UpperCamelCase_ ( ) -> None:
a__ : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(__a ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__a ) == i
linked_list.insert_nth(__a , i + 1 )
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__a ) == "->".join(str(__a ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__a ) == 9
assert str(__a ) == "->".join(str(__a ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a__ : int = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__a ) == "->".join(str(__a ) for i in range(-8 , 1 ) )
def UpperCamelCase_ ( ) -> None:
a__ : Union[str, Any] = [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-192.55555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
a__ : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(__a )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a__ : int = linked_list.delete_head()
assert result == -9
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a__ : List[Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a__ : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(__a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__a )
assert (
str(__a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__a )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCamelCase_ ( ) -> Tuple:
from doctest import testmod
testmod()
a__ : Any = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__a )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
a__ : Optional[int] = input("Enter New Value: " ).strip()
print("New list:" )
print(__a )
print(f'''length of linked_list is : {len(__a )}''' )
if __name__ == "__main__":
main()
| 37
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->int:
_UpperCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase =""
else:
_UpperCAmelCase ="deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase =in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase =in_proj_bias[: config.hidden_size]
_UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( ) ->int:
_UpperCAmelCase ="http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =DeiTConfig()
# all deit models have fine-tuned heads
_UpperCAmelCase =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase =1000
_UpperCAmelCase ="huggingface/label-files"
_UpperCAmelCase ="imagenet-1k-id2label.json"
_UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase =idalabel
_UpperCAmelCase ={v: k for k, v in idalabel.items()}
_UpperCAmelCase =int(deit_name[-6:-4] )
_UpperCAmelCase =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_UpperCAmelCase =192
_UpperCAmelCase =768
_UpperCAmelCase =12
_UpperCAmelCase =3
elif deit_name[9:].startswith("small" ):
_UpperCAmelCase =384
_UpperCAmelCase =1536
_UpperCAmelCase =12
_UpperCAmelCase =6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_UpperCAmelCase =1024
_UpperCAmelCase =4096
_UpperCAmelCase =24
_UpperCAmelCase =16
# load original model from timm
_UpperCAmelCase =timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase =timm_model.state_dict()
_UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_UpperCAmelCase =DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCAmelCase =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCAmelCase =DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
_UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase =encoding["pixel_values"]
_UpperCAmelCase =model(_lowerCamelCase )
_UpperCAmelCase =timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
snake_case__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 408
| 0
|
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowerCamelCase__ = [p / w for p, w in zip(__snake_case ,__snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowerCamelCase__ = sorted(__snake_case )
# declaring useful variables
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowerCamelCase__ = sorted_profit_by_weight[length - i - 1]
lowerCamelCase__ = profit_by_weight.index(__snake_case )
lowerCamelCase__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_a = [int(x) for x in input("Input profits separated by spaces: ").split()]
_a = [int(x) for x in input("Input weights separated by spaces: ").split()]
_a = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 29
|
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a = 16
__a = 32
def lowerCamelCase__ ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
UpperCAmelCase_ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_lowercase )
UpperCAmelCase_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
UpperCAmelCase_ : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
UpperCAmelCase_ : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config['''lr''']
UpperCAmelCase_ : Union[str, Any] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[Any] = int(config['''seed'''] )
UpperCAmelCase_ : int = int(config['''batch_size'''] )
UpperCAmelCase_ : List[str] = args.model_name_or_path
set_seed(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : int = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
UpperCAmelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : str = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[Any] = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
UpperCAmelCase_ : List[str] = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : str = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ : List[str] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(_lowercase ) + 1
UpperCAmelCase_ : List[str] = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCAmelCase_ : Any = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : Union[str, Any] = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
UpperCAmelCase_ : List[str] = model(**_lowercase )
UpperCAmelCase_ : int = outputs.loss
UpperCAmelCase_ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : str = f'''epoch_{epoch}'''
UpperCAmelCase_ : Optional[Any] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
UpperCAmelCase_ : Any = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase_ : Union[str, Any] = accuracy
UpperCAmelCase_ : List[str] = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[Any] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Any = epoch
UpperCAmelCase_ : Any = overall_step
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 30
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : str
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[List[str]]
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : List[int]
__lowerCAmelCase : List[int]
__lowerCAmelCase : Optional[List[int]] =None
__lowerCAmelCase : Optional[List[int]] =None
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : List[str] ='''train'''
__lowerCAmelCase : int ='''dev'''
__lowerCAmelCase : Union[str, Any] ='''test'''
class snake_case_ :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( UpperCamelCase , UpperCamelCase):
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( UpperCamelCase):
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase="[CLS]" , UpperCamelCase=1 , UpperCamelCase="[SEP]" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=-1_00 , UpperCamelCase=0 , UpperCamelCase=True , ):
lowerCamelCase__ = {label: i for i, label in enumerate(UpperCamelCase)}
lowerCamelCase__ = []
for ex_index, example in enumerate(UpperCamelCase):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" , UpperCamelCase , len(UpperCamelCase))
lowerCamelCase__ = []
lowerCamelCase__ = []
for word, label in zip(example.words , example.labels):
lowerCamelCase__ = tokenizer.tokenize(UpperCamelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase) > 0:
tokens.extend(UpperCamelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCamelCase__ = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase) > max_seq_length - special_tokens_count:
lowerCamelCase__ = tokens[: (max_seq_length - special_tokens_count)]
lowerCamelCase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCamelCase__ = [sequence_a_segment_id] * len(UpperCamelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCamelCase__ = [cls_token] + tokens
lowerCamelCase__ = [pad_token_label_id] + label_ids
lowerCamelCase__ = [cls_token_segment_id] + segment_ids
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCamelCase__ = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase)
# Zero-pad up to the sequence length.
lowerCamelCase__ = max_seq_length - len(UpperCamelCase)
if pad_on_left:
lowerCamelCase__ = ([pad_token] * padding_length) + input_ids
lowerCamelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCamelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCamelCase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase) == max_seq_length
assert len(UpperCamelCase) == max_seq_length
assert len(UpperCamelCase) == max_seq_length
assert len(UpperCamelCase) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" , example.guid)
logger.info("tokens: %s" , " ".join([str(UpperCamelCase) for x in tokens]))
logger.info("input_ids: %s" , " ".join([str(UpperCamelCase) for x in input_ids]))
logger.info("input_mask: %s" , " ".join([str(UpperCamelCase) for x in input_mask]))
logger.info("segment_ids: %s" , " ".join([str(UpperCamelCase) for x in segment_ids]))
logger.info("label_ids: %s" , " ".join([str(UpperCamelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase__ = None
features.append(
InputFeatures(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , label_ids=UpperCamelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : List[InputFeatures]
__lowerCAmelCase : int =nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ):
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
UpperCamelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + ".lock"
with FileLock(UpperCamelCase):
if os.path.exists(UpperCamelCase) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""")
lowerCamelCase__ = torch.load(UpperCamelCase)
else:
logger.info(f"""Creating features from dataset file at {data_dir}""")
lowerCamelCase__ = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase__ = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""")
torch.save(self.features , UpperCamelCase)
def __len__( self):
return len(self.features)
def __getitem__( self , UpperCamelCase):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : List[InputFeatures]
__lowerCAmelCase : int =-1_0_0
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ):
lowerCamelCase__ = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase__ = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase__ = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
lowerCamelCase__ = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self):
return len(self.features)
def __getitem__( self , UpperCamelCase):
return self.features[i]
| 426
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCAmelCase : Any ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCAmelCase : Optional[int] ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = ZeroShotClassificationPipeline(
model=UpperCamelCase , tokenizer=UpperCamelCase , candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics")
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
# No kwarg
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , ["politics"])
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"])
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health")
self.assertEqual(
UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])) , 1.0)
lowerCamelCase__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"])
self.assertEqual(
UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])) , 1.0)
lowerCamelCase__ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}")
self.assertEqual(UpperCamelCase , {"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase)]})
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase__ = classifier(["I am happy"] , ["positive", "negative"])
self.assertEqual(
UpperCamelCase , [
{"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]}
for i in range(1)
] , )
lowerCamelCase__ = classifier(["I am happy", "I am sad"] , ["positive", "negative"])
self.assertEqual(
UpperCamelCase , [
{"sequence": ANY(UpperCamelCase), "labels": [ANY(UpperCamelCase), ANY(UpperCamelCase)], "scores": [ANY(UpperCamelCase), ANY(UpperCamelCase)]}
for i in range(2)
] , )
with self.assertRaises(UpperCamelCase):
classifier("" , candidate_labels="politics")
with self.assertRaises(UpperCamelCase):
classifier(UpperCamelCase , candidate_labels="politics")
with self.assertRaises(UpperCamelCase):
classifier("Who are you voting for in 2020?" , candidate_labels="")
with self.assertRaises(UpperCamelCase):
classifier("Who are you voting for in 2020?" , candidate_labels=UpperCamelCase)
with self.assertRaises(UpperCamelCase):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(UpperCamelCase):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=UpperCamelCase , )
self.run_entailment_id(UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = zero_shot_classifier.model.config
lowerCamelCase__ = config.labelaid
lowerCamelCase__ = zero_shot_classifier.entailment_id
lowerCamelCase__ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
lowerCamelCase__ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCamelCase__ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCamelCase__ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
lowerCamelCase__ = original_labelaid
self.assertEqual(UpperCamelCase , zero_shot_classifier.entailment_id)
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"])
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt")
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __UpperCAmelCase ( self):
lowerCamelCase__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf")
lowerCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 426
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__magic_name__ = 10
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
__snake_case : Tuple = 0
__snake_case : Any = len(__UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[Any] = (left + right) // 3 + 1
__snake_case : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__snake_case : int = one_third - 1
elif array[two_third] < target:
__snake_case : Any = two_third + 1
else:
__snake_case : Dict = one_third + 1
__snake_case : str = two_third - 1
else:
return -1
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[str] = (left + right) // 3 + 1
__snake_case : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCAmelCase , one_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip())
__magic_name__ = ite_ternary_search(collection, target)
__magic_name__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 576
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__magic_name__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__magic_name__ = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__( __UpperCAmelCase : Vector , __UpperCAmelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__UpperCAmelCase ) - np.asarray(__UpperCAmelCase )) ** 2 ) )
def UpperCAmelCase__( __UpperCAmelCase : Vector , __UpperCAmelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__UpperCAmelCase , __UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 576
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = DanceDiffusionPipeline
__lowerCamelCase : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
__lowerCamelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = False
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCAmelCase , use_timestep_embedding=__lowerCAmelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
A__ = IPNDMScheduler()
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def a_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(__lowerCAmelCase )
else:
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
A__ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**__lowerCAmelCase )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = self.get_dummy_inputs(__lowerCAmelCase )
A__ = pipe(**__lowerCAmelCase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=__lowerCAmelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_9_6 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
A__ = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=__lowerCAmelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_9_6 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 176
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
A : List[Any] = parser.parse_args()
A : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : Tuple = CLIPImageProcessor()
A : int = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
A : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 176
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Dict=7 ,_a : Dict=3 ,_a : Optional[int]=18 ,_a : List[str]=30 ,_a : Union[str, Any]=400 ,_a : Optional[Any]=True ,_a : List[str]=None ,_a : Any=True ,):
'''simple docstring'''
A_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
A_ : str = parent
A_ : List[str] = batch_size
A_ : str = num_channels
A_ : Dict = image_size
A_ : Dict = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = apply_ocr
def _a ( self : Dict ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
a_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self : int ):
'''simple docstring'''
A_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowercase ,"""size""" ) )
self.assertTrue(hasattr(_lowercase ,"""apply_ocr""" ) )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _a ( self : int ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,Image.Image )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_lowercase )
self.assertIsInstance(encoding.boxes ,_lowercase )
# Test batched
A_ : Union[str, Any] = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Union[str, Any] = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,torch.Tensor )
# Test not batched input
A_ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Dict = image_processing(_lowercase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : Dict = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
A_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A_ : List[str] = image_processing(_lowercase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A_ : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_lowercase )
self.assertListEqual(encoding.boxes ,_lowercase )
# with apply_OCR = False
A_ : int = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
A_ : int = image_processing(_lowercase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 721
|
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = '''dinat'''
lowerCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=6_4 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 1_6] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1E-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = patch_size
A_ : List[str] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : List[Any] = len(lowercase )
A_ : Optional[int] = num_heads
A_ : Any = kernel_size
A_ : Tuple = dilations
A_ : Optional[int] = mlp_ratio
A_ : Optional[Any] = qkv_bias
A_ : Dict = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = drop_path_rate
A_ : str = hidden_act
A_ : Optional[Any] = layer_norm_eps
A_ : Any = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : Dict = int(embed_dim * 2 ** (len(lowercase ) - 1) )
A_ : Optional[int] = layer_scale_init_value
A_ : Optional[int] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
A_ , A_ : Any = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 558
| 0
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = "x" , UpperCamelCase__ = 10**-10 , UpperCamelCase__ = 1 , ) -> int:
"""simple docstring"""
A = symbols(UpperCamelCase__ )
A = lambdify(UpperCamelCase__ , UpperCamelCase__ )
A = lambdify(UpperCamelCase__ , diff(UpperCamelCase__ , UpperCamelCase__ ) )
A = starting_point
while True:
if diff_function(UpperCamelCase__ ) != 0:
A = prev_guess - multiplicity * func(UpperCamelCase__ ) / diff_function(
UpperCamelCase__ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 702
|
"""simple docstring"""
def __snake_case ( ) -> int:
"""simple docstring"""
return 1
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ = 200 ) -> int:
"""simple docstring"""
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 91
| 0
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=6_4 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=6_4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def snake_case__ ( self ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = MPNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = MPNetForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_choices
_lowerCamelCase = MPNetForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Tuple = False
lowercase__ : Dict = True
def snake_case__ ( self ):
_lowerCamelCase = MPNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCamelCase__ )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
_lowerCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = model(lowerCamelCase__ )[0]
_lowerCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_snake_case = [1, 2, 3]
with pytest.raises(lowerCAmelCase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=2 )
with pytest.raises(lowerCAmelCase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_snake_case = [1, 2]
_snake_case = {'a': 1, 'b': 2}
_snake_case = {'a': [1, 2], 'b': [3, 4]}
_snake_case = {'a': {'1': 1}, 'b': 2}
_snake_case = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_snake_case = [2, 3]
_snake_case = {'a': 2, 'b': 3}
_snake_case = {'a': [2, 3], 'b': [4, 5]}
_snake_case = {'a': {'1': 2}, 'b': 3}
_snake_case = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
| 721
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = GenerationConfig(
do_sample=lowerCAmelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ , config_name=lowerCAmelCase_ )
_snake_case = GenerationConfig.from_pretrained(lowerCAmelCase_ , config_name=lowerCAmelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCAmelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = AutoConfig.from_pretrained('gpt2' )
_snake_case = GenerationConfig.from_model_config(lowerCAmelCase_ )
_snake_case = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = GenerationConfig()
_snake_case = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_snake_case = copy.deepcopy(lowerCAmelCase_ )
_snake_case = generation_config.update(**lowerCAmelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCAmelCase_ , {'foo': 'bar'} )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = GenerationConfig()
_snake_case = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(lowerCAmelCase_ )
_snake_case = GenerationConfig.from_pretrained(lowerCAmelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
_snake_case = GenerationConfig.from_model_config(lowerCAmelCase_ )
assert not hasattr(lowerCAmelCase_ , 'foo' ) # no new kwargs should be initialized if from config
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCAmelCase_ )
self.assertEqual(default_config.num_beams , 1 )
_snake_case = GenerationConfig(
do_sample=lowerCAmelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCAmelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
_snake_case = GenerationConfig.from_pretrained(lowerCAmelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCAmelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls ) -> List[str]:
_snake_case = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def lowerCAmelCase ( self ) -> int:
_snake_case = GenerationConfig(
do_sample=lowerCAmelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
_snake_case = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase_ , repo_id='test-generation-config' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_snake_case = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = GenerationConfig(
do_sample=lowerCAmelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
_snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
| 541
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = DPTConfig()
if "large" in checkpoint_url:
lowerCAmelCase_ : List[str] = 10_24
lowerCAmelCase_ : Optional[Any] = 40_96
lowerCAmelCase_ : Dict = 24
lowerCAmelCase_ : List[Any] = 16
lowerCAmelCase_ : List[str] = [5, 11, 17, 23]
lowerCAmelCase_ : Tuple = [2_56, 5_12, 10_24, 10_24]
lowerCAmelCase_ : Any = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """huggingface/label-files"""
lowerCAmelCase_ : Optional[Any] = """ade20k-id2label.json"""
lowerCAmelCase_ : Dict = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase_ : int = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Dict = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase_ : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCAmelCase_ : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
lowerCAmelCase_ : Any = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCAmelCase_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCAmelCase_ : List[str] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCAmelCase_ : Tuple = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCAmelCase_ : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCAmelCase_ : str = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCAmelCase_ : Tuple = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCAmelCase_ : Any = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCAmelCase_ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase_ : Optional[Any] = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
lowerCAmelCase_ : Any = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCAmelCase_ : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCAmelCase_ : str = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCAmelCase_ : Tuple = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase_ : Any = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase_ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase_ : str = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase_ : Any = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase_ : Tuple = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase_ : List[str] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase_ : List[str] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase_ : Dict = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCAmelCase_ : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCAmelCase_ : Dict = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCAmelCase_ : Dict = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def UpperCamelCase_ ( A__ : int , A__ : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
lowerCAmelCase_ : List[Any] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[str] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : Any = get_dpt_config(A__ )
# load original state_dict from URL
lowerCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(A__ )
lowerCAmelCase_ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
lowerCAmelCase_ : List[str] = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
lowerCAmelCase_ : Union[str, Any] = 4_80 if """ade""" in checkpoint_url else 3_84
lowerCAmelCase_ : Optional[Any] = DPTImageProcessor(size=A__ )
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Dict = image_processor(A__ , return_tensors="""pt""" )
# forward pass
lowerCAmelCase_ : str = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
lowerCAmelCase_ : Dict = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
__A : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 275
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether tp freeze the encoder.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to freeze the embeddings.'})
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
lowercase = field(
default='summarization' ,metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} ,)
lowercase = field(
default=10_24 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(default=-1 ,metadata={'help': '# training examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# validation examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# test examples. -1 means use all.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Source language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Target language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': '# num_beams to use for evaluation.'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} ,)
def UpperCamelCase_ ( A__ : Tuple , A__ : List[str] , A__ : str ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , f'{split}_results.json' ) )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase_ : Any = SeqaSeqDataset
# Get datasets
lowerCAmelCase_ : List[str] = (
dataset_class(
A__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase_ : Dict = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase_ : str = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
lowerCAmelCase_ : Dict = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase_ : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase_ : Union[str, Any] = train_result.metrics
lowerCAmelCase_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : Any = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase_ : Optional[int] = data_args.n_val
lowerCAmelCase_ : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase_ : Union[str, Any] = trainer.predict(test_dataset=A__ , metric_key_prefix="""test""" )
lowerCAmelCase_ : Optional[int] = test_output.metrics
lowerCAmelCase_ : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase_ : int = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
lowerCAmelCase_ : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowerCAmelCase_ : List[Any] = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 275
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase =analyze_text(__UpperCamelCase )
__UpperCamelCase =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__UpperCamelCase =sum(single_char_strings.values() )
# one length string
__UpperCamelCase =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCamelCase =single_char_strings[ch]
__UpperCamelCase =my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__UpperCamelCase =sum(two_char_strings.values() )
__UpperCamelCase =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCamelCase =cha + cha
if sequence in two_char_strings:
__UpperCamelCase =two_char_strings[sequence]
__UpperCamelCase =int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =Counter() # type: ignore
__UpperCamelCase =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase ():
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 296
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =10
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__UpperCamelCase =[1, 2, 3, 4]
__UpperCamelCase =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase__ , self.block_size , 0 ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__UpperCamelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase__ , self.block_size , 0 ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__UpperCamelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase__ , self.block_size , 0 ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase ='''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__UpperCamelCase , __UpperCamelCase =process_story(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =''''''
__UpperCamelCase , __UpperCamelCase =process_story(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [] )
self.assertEqual(UpperCamelCase__ , [] )
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =(
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__UpperCamelCase , __UpperCamelCase =process_story(UpperCamelCase__ )
__UpperCamelCase =[
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =['''It was the best of times.''']
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =torch.tensor([1, 2, 3, 4] )
__UpperCamelCase =torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase__ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__UpperCamelCase =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase__ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__UpperCamelCase =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase__ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =101
__UpperCamelCase =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__UpperCamelCase =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__UpperCamelCase =compute_token_type_ids(UpperCamelCase__ , UpperCamelCase__ )
np.testing.assert_array_equal(UpperCamelCase__ , UpperCamelCase__ )
| 296
| 1
|
import collections
import os
import re
from pathlib import Path
lowercase : List[Any] = """src/transformers"""
# Matches is_xxx_available()
lowercase : List[str] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : Optional[int] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase : Union[str, Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase : str = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : Union[str, Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : Union[str, Any] = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : Union[str, Any] = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase : Union[str, Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase : Any = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowercase : Tuple = re.compile(r"""^\s*else:""")
def A_ ( A__ ) -> Any:
if _re_test_backend.search(lowercase_ ) is None:
return None
a__ : Optional[Any] = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def A_ ( A__ ) -> List[Any]:
with open(lowercase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Tuple = f.readlines()
a__ : Dict = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
a__ : Any = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
a__ : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
a__ : Dict = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
a__ : Tuple = re.findall(R'\[([^\]]+)\]' , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
a__ : Dict = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
a__ : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
a__ : Optional[int] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
a__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
a__ : Dict = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
a__ : Union[str, Any] = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(', ' )
a__ : Dict = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
a__ : int = _re_between_brackets.search(lowercase_ ).groups()[0].split(', ' )
a__ : Dict = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
a__ : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
a__ : List[Any] = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
a__ : List[Any] = lines[line_index]
a__ : Dict = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
a__ : Tuple = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
a__ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
a__ : str = lines[line_index]
a__ : Optional[int] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
a__ : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( A__ , A__ ) -> Optional[int]:
def find_duplicates(A__ ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
a__ : Dict = []
for key in import_dict_objects.keys():
a__ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
a__ : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
a__ : List[str] = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ) -> Any:
a__ : Union[str, Any] = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
a__ : Any = os.path.join(lowercase_ , '__init__.py' )
a__ : Optional[Any] = parse_init(lowercase_ )
if objects is not None:
a__ : Optional[Any] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
a__ : List[str] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError('\n\n'.join(lowercase_ ) )
def A_ ( ) -> List[Any]:
a__ : str = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob('*.py' ) ) ) == 0:
continue
a__ : Optional[int] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
a__ : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
a__ : Union[str, Any] = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
a__ : Optional[int] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase_ )
return submodules
lowercase : Dict = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A_ ( ) -> Any:
from transformers.utils import direct_transformers_import
a__ : int = direct_transformers_import(lowercase_ )
a__ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase_ , '__init__.py' ) , 'r' ) as f:
a__ : Dict = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowercase_ ) ) )
a__ : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase_ ) > 0:
a__ : List[Any] = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 302
|
'''simple docstring'''
import cmath
import math
def __UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
"""simple docstring"""
a_ = math.radians(lowercase_ )
a_ = math.radians(lowercase_ )
# Convert voltage and current to rectangular form
a_ = cmath.rect(lowercase_ , lowercase_ )
a_ = cmath.rect(lowercase_ , lowercase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 536
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list:
"""simple docstring"""
snake_case_ : List[str] = [0] * len(__magic_name__ )
for i in range(1 ,len(__magic_name__ ) ):
# use last results for better performance - dynamic programming
snake_case_ : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case_ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case_ : int = j
return prefix_result
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return max(prefix_function(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__UpperCamelCase : Any = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""DPTFeatureExtractor"""]
__UpperCamelCase : int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = ["image_processor", "tokenizer"]
A: Optional[Any] = "LayoutLMv2ImageProcessor"
A: List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : Optional[int] = features['''words''']
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
UpperCamelCase__ : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ : Union[str, Any] = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ : Tuple = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 106
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : List[str] = {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : Optional[int] = {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = probability
def lowercase_ ( self ) -> list[str]:
return list(self.connections )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[tuple[str, str, float]] , UpperCAmelCase_ : int ) -> dict[str, int]:
__lowerCamelCase : Any = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = Counter(graph.get_nodes() )
__lowerCamelCase : Optional[int] = start
for _ in range(UpperCAmelCase_ ):
__lowerCamelCase : List[Any] = graph.transition(UpperCAmelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : str, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = True, **lowerCamelCase : List[Any], )-> int:
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : int =size if size is not None else {"shortest_edge": 224}
lowerCamelCase__ : Optional[Any] =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
lowerCamelCase__ : Tuple =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__ : Union[str, Any] =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase, param_name='''crop_size''' )
lowerCamelCase__ : List[str] =do_resize
lowerCamelCase__ : int =size
lowerCamelCase__ : Optional[int] =resample
lowerCamelCase__ : List[str] =do_center_crop
lowerCamelCase__ : Dict =crop_size
lowerCamelCase__ : List[Any] =do_rescale
lowerCamelCase__ : Optional[int] =rescale_factor
lowerCamelCase__ : Optional[Any] =do_normalize
lowerCamelCase__ : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : Optional[Any] =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Optional[Any] =do_convert_rgb
def snake_case ( self : Tuple, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : str, )-> Union[str, Any]:
lowerCamelCase__ : int =get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase__ : Any =get_resize_output_image_size(__lowerCamelCase, size=size['''shortest_edge'''], default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], )-> Union[str, Any]:
lowerCamelCase__ : Any =get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__lowerCamelCase, size=(size['''height'''], size['''width''']), data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Union[int, float], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : List[str], )-> Tuple:
return rescale(__lowerCamelCase, scale=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : np.ndarray, lowerCamelCase : Union[float, List[float]], lowerCamelCase : Union[float, List[float]], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Dict, )-> Tuple:
return normalize(__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = None, lowerCamelCase : bool = None, lowerCamelCase : int = None, lowerCamelCase : bool = None, lowerCamelCase : float = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Optional[Any], )-> Union[str, Any]:
lowerCamelCase__ : int =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Dict =size if size is not None else self.size
lowerCamelCase__ : str =get_size_dict(__lowerCamelCase, param_name='''size''', default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict =resample if resample is not None else self.resample
lowerCamelCase__ : List[str] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : int =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : List[str] =get_size_dict(__lowerCamelCase, param_name='''crop_size''', default_to_square=__lowerCamelCase )
lowerCamelCase__ : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[int] =image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : Optional[int] =make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Union[str, Any] =[convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase__ : int =[self.resize(image=__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Any =[self.center_crop(image=__lowerCamelCase, size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : List[str] =[self.rescale(image=__lowerCamelCase, scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : Any =[self.normalize(image=__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase ) for image in images]
lowerCamelCase__ : Optional[Any] =[to_channel_dimension_format(__lowerCamelCase, __lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={"pixel_values": images}
return BatchFeature(data=__lowerCamelCase, tensor_type=__lowerCamelCase )
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: Dict = tempfile.mkdtemp()
__magic_name__: List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
__magic_name__: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__magic_name__: Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 2_2_4, """width""": 2_2_4},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 1_8, """width""": 1_8},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
__magic_name__: Tuple = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(a__ , a__ )
def lowerCamelCase__ ( self : List[Any] , **__snake_case : List[Any] ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCamelCase__ ( self : List[str] , **__snake_case : Any ) -> Dict:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def lowerCamelCase__ ( self : Optional[Any] , **__snake_case : str ) -> List[str]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__: Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__magic_name__: List[str] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_rust_tokenizer()
__magic_name__: Union[str, Any] = self.get_image_processor()
__magic_name__: int = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
__magic_name__: Tuple = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__: Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__: str = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
__magic_name__: str = self.get_image_processor(do_normalize=a__ )
__magic_name__: Any = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=a__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
__magic_name__: str = self.get_image_processor()
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
__magic_name__: int = self.prepare_image_inputs()
__magic_name__: str = image_processor(a__ , return_tensors="""np""" )
__magic_name__: Tuple = processor(images=a__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__: Any = self.get_image_processor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Any = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
__magic_name__: Tuple = """Alexandra,T-shirt的价格是15便士。"""
__magic_name__: Union[str, Any] = processor(text=a__ )
__magic_name__: Optional[int] = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: Union[str, Any] = self.get_image_processor()
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: List[Any] = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
__magic_name__: Dict = """Alexandra,T-shirt的价格是15便士。"""
__magic_name__: str = self.prepare_image_inputs()
__magic_name__: Dict = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCamelCase__ ( self : Optional[int] ) -> str:
__magic_name__: List[Any] = self.get_image_processor()
__magic_name__: str = self.get_tokenizer()
__magic_name__: str = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
__magic_name__: List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__: Dict = processor.batch_decode(a__ )
__magic_name__: Optional[Any] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__: Any = self.get_image_processor()
__magic_name__: int = self.get_tokenizer()
__magic_name__: Optional[int] = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
__magic_name__: Dict = """Alexandra,T-shirt的价格是15便士。"""
__magic_name__: str = self.prepare_image_inputs()
__magic_name__: Dict = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 96
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = 50 # max width of layer names
_SCREAMING_SNAKE_CASE : Union[str, Any] = 70 # max width of quantizer names
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=snake_case , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=snake_case , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=snake_case , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=snake_case , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=snake_case , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=snake_case , type=snake_case , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=snake_case , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
if args.calibrator == "max":
snake_case_ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
snake_case_ = "histogram"
elif args.calibrator == "mse":
snake_case_ = "histogram"
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
snake_case_ = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case )
snake_case_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case )
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=False , snake_case : List[Any]=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case , ["embeddings"] , which="weight" , _disabled=snake_case )
if args.quant_disable:
set_quantizer_by_name(snake_case , [""] , _disabled=snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=snake_case )
if args.recalibrate_weights:
recalibrate_weights(snake_case )
if args.fuse_qkv:
fuse_qkv(snake_case , snake_case )
if args.clip_gelu:
clip_gelu(snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] ):
'''simple docstring'''
def fusea(snake_case : List[Any] , snake_case : str , snake_case : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
snake_case_ = qq._amax.detach().item()
snake_case_ = qk._amax.detach().item()
snake_case_ = qv._amax.detach().item()
snake_case_ = max(snake_case , snake_case , snake_case )
qq._amax.fill_(snake_case )
qk._amax.fill_(snake_case )
qv._amax.fill_(snake_case )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase_( snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
snake_case_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case )
snake_case_ = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
snake_case_ = mod.weight.shape[0]
snake_case_ = mod._weight_quantizer._amax.detach()
snake_case_ = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case_ = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
snake_case_ = amax
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[Any]=2_5 , snake_case : Optional[Any]=1_8_0 , snake_case : int=None ):
'''simple docstring'''
if ignore is None:
snake_case_ = []
elif not isinstance(snake_case , snake_case ):
snake_case_ = [ignore]
snake_case_ = 0
for name, mod in model.named_modules():
if not hasattr(snake_case , "weight" ):
continue
snake_case_ = max(snake_case , len(snake_case ) )
for name, mod in model.named_modules():
snake_case_ = getattr(snake_case , "_input_quantizer" , snake_case )
snake_case_ = getattr(snake_case , "_weight_quantizer" , snake_case )
if not hasattr(snake_case , "weight" ):
continue
if type(snake_case ) in ignore:
continue
if [True for s in ignore if type(snake_case ) is str and s in name]:
continue
snake_case_ = f'Act:{input_q.extra_repr()}'
snake_case_ = f'Wgt:{weight_q.extra_repr()}'
snake_case_ = f'{name:{name_width}} {act_str} {wgt_str}'
if len(snake_case ) <= line_width:
logger.info(snake_case )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = 0
for name, mod in model.named_modules():
if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = getattr(snake_case , snake_case , snake_case )
if quantizer_mod is not None:
assert hasattr(snake_case , snake_case )
setattr(snake_case , snake_case , snake_case )
else:
logger.warning(f'{name} has no {quantizer}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple="both" , **snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(snake_case , snake_case , "_input_quantizer" , snake_case , snake_case )
if which in ["weight", "both"]:
set_quantizer(snake_case , snake_case , "_weight_quantizer" , snake_case , snake_case )
logger.info(snake_case )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : str , **snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_input_quantizer" ) or hasattr(snake_case , "_weight_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
set_quantizers(snake_case , snake_case , **snake_case )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
snake_case_ = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(snake_case , snake_case , snake_case )
logger.info(snake_case )
| 400
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Dict = '''▁'''
snake_case : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case : Union[str, Any] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
snake_case : Optional[Any] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
snake_case : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class snake_case_ (__lowercase ):
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[Any] = []
def __init__( self :Tuple ,__snake_case :str ,__snake_case :Any=None ,__snake_case :List[Any]=None ,__snake_case :List[str]="</s>" ,__snake_case :Optional[int]="</s>" ,__snake_case :List[Any]="<s>" ,__snake_case :int="<unk>" ,__snake_case :List[Any]="<pad>" ,__snake_case :List[str]="<mask>" ,__snake_case :Optional[Dict[str, Any]] = None ,**__snake_case :List[Any] ,) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
a__ = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A ,tgt_lang=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
a__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ = 1
a__ = len(self.sp_model )
a__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
a__ = {v: k for k, v in self.lang_code_to_id.items()}
a__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ = src_lang if src_lang is not None else 'en_XX'
a__ = self.lang_code_to_id[self._src_lang]
a__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__( self :List[str] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__( self :Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> List[Any]:
a__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self :List[str] ) -> int:
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self :List[Any] ,__snake_case :Dict ) -> Optional[int]:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__( self :Any ) -> str:
a__ = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__( self :str ,__snake_case :str ) -> Tuple:
return self.sp_model.encode(_A ,out_type=_A )
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__( self :int ,__snake_case :int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> Tuple:
a__ = []
a__ = ''
a__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
a__ = True
a__ = []
else:
current_sub_tokens.append(_A )
a__ = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Any:
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def lowerCamelCase__( self :Dict ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
a__ = [1] * len(self.prefix_tokens )
a__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[str]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] ,__snake_case :Optional[str] ,**__snake_case :List[Any] ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a__ = src_lang
a__ = self(_A ,add_special_tokens=_A ,return_tensors=_A ,**_A )
a__ = self.convert_tokens_to_ids(_A )
a__ = tgt_lang_id
return inputs
def lowerCamelCase__( self :int ,__snake_case :List[str] ,__snake_case :str = "en_XX" ,__snake_case :Optional[List[str]] = None ,__snake_case :str = "ro_RO" ,**__snake_case :List[str] ,) -> Optional[int]:
a__ = src_lang
a__ = tgt_lang
return super().prepare_seqaseq_batch(_A ,_A ,**_A )
def lowerCamelCase__( self :str ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__( self :Tuple ,__snake_case :str ) -> str:
a__ = self.lang_code_to_id[src_lang]
a__ = [self.cur_lang_code_id]
a__ = [self.eos_token_id]
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> Optional[int]:
a__ = self.lang_code_to_id[tgt_lang]
a__ = [self.cur_lang_code_id]
a__ = [self.eos_token_id]
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[Any] = ["""pixel_values"""]
def __init__( self, snake_case__ = True, snake_case__ = None, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = True, snake_case__ = None, snake_case__ = True, snake_case__ = 1 / 2_55, snake_case__ = True, snake_case__ = True, snake_case__ = None, snake_case__ = None, **snake_case__, ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : str = size if size is not None else {"""shortest_edge""": 2_56}
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase_ : Optional[int] = get_size_dict(snake_case__, param_name="""crop_size""" )
lowercase_ : List[Any] = do_resize
lowercase_ : int = size
lowercase_ : int = do_center_crop
lowercase_ : Optional[Any] = crop_size
lowercase_ : str = resample
lowercase_ : Any = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : List[Any] = offset
lowercase_ : int = do_normalize
lowercase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = get_resize_output_image_size(snake_case__, size["""shortest_edge"""], default_to_square=snake_case__ )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case__, size=snake_case__, resample=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case__, size=(size["""height"""], size["""width"""]), data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = True, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = image.astype(np.floataa )
if offset:
lowercase_ : Optional[Any] = image - (scale / 2)
return rescale(snake_case__, scale=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__, mean=snake_case__, std=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = to_numpy_array(snake_case__ )
if do_resize:
lowercase_ : Dict = self.resize(image=snake_case__, size=snake_case__, resample=snake_case__ )
if do_center_crop:
lowercase_ : Optional[int] = self.center_crop(snake_case__, size=snake_case__ )
if do_rescale:
lowercase_ : Dict = self.rescale(image=snake_case__, scale=snake_case__, offset=snake_case__ )
if do_normalize:
lowercase_ : Optional[Any] = self.normalize(image=snake_case__, mean=snake_case__, std=snake_case__ )
lowercase_ : Tuple = to_channel_dimension_format(snake_case__, snake_case__ )
return image
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, **snake_case__, ) -> PIL.Image.Image:
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = offset if offset is not None else self.offset
lowercase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Tuple = get_size_dict(snake_case__, param_name="""crop_size""" )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase_ : Optional[Any] = make_batched(snake_case__ )
lowercase_ : Optional[Any] = [
[
self._preprocess_image(
image=snake_case__, do_resize=snake_case__, size=snake_case__, resample=snake_case__, do_center_crop=snake_case__, crop_size=snake_case__, do_rescale=snake_case__, rescale_factor=snake_case__, offset=snake_case__, do_normalize=snake_case__, image_mean=snake_case__, image_std=snake_case__, data_format=snake_case__, )
for img in video
]
for video in videos
]
lowercase_ : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case__, tensor_type=snake_case__ )
| 458
|
def __magic_name__ ( lowercase = 100 ) -> int:
"""simple docstring"""
lowercase_ : Dict = (n * (n + 1) // 2) ** 2
lowercase_ : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 458
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __A (_SCREAMING_SNAKE_CASE : int = 8 ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __A (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) ->str:
"""simple docstring"""
i -= len(_lowerCamelCase )
lowerCAmelCase__ :str = i // 3
lowerCAmelCase__ :int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase__ :Union[str, Any] = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
lowerCAmelCase__ :str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def __A (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) ->Optional[Any]:
"""simple docstring"""
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __A (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ) ->Optional[Any]:
"""simple docstring"""
pass # Put your code here...
def __A (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __A (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) ->Optional[Any]:
"""simple docstring"""
pass # Put your code here...
def __A (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int = 8 ) ->List[Any]:
"""simple docstring"""
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase__ :Tuple = any(char in ascii_uppercase for char in password )
lowerCAmelCase__ :str = any(char in ascii_lowercase for char in password )
lowerCAmelCase__ :Tuple = any(char in digits for char in password )
lowerCAmelCase__ :Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :str = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCAmelCase__ :Any = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_lowerCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 700
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :List[Any] = d_embed
lowerCAmelCase__ :Union[str, Any] = d_proj
lowerCAmelCase__ :str = cutoffs + [vocab_size]
lowerCAmelCase__ :Optional[Any] = [0] + self.cutoffs
lowerCAmelCase__ :Any = div_val
lowerCAmelCase__ :int = self.cutoffs[0]
lowerCAmelCase__ :Optional[Any] = len(self.cutoffs ) - 1
lowerCAmelCase__ :str = self.shortlist_size + self.n_clusters
lowerCAmelCase__ :List[Any] = keep_order
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :str = []
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.n_clusters > 0:
lowerCAmelCase__ :Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=__UpperCAmelCase , name='cluster_weight' )
lowerCAmelCase__ :Tuple = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=__UpperCAmelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase__ :Optional[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_projs_._{i}" , )
self.out_projs.append(__UpperCAmelCase )
else:
self.out_projs.append(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_layers_._{i}_._weight" , )
lowerCAmelCase__ :Dict = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ :int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ :Optional[Any] = self.d_embed // (self.div_val**i)
lowerCAmelCase__ :int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_projs_._{i}" )
self.out_projs.append(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_layers_._{i}_._weight" , )
lowerCAmelCase__ :Tuple = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=__UpperCAmelCase , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__UpperCAmelCase )
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Dict = x
if proj is not None:
lowerCAmelCase__ :List[str] = tf.einsum('ibd,ed->ibe' , __UpperCAmelCase , __UpperCAmelCase )
return tf.einsum('ibd,nd->ibn' , __UpperCAmelCase , __UpperCAmelCase ) + b
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = shape_list(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase__ :Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 0
if self.n_clusters == 0:
lowerCAmelCase__ :Union[str, Any] = self._logit(__UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase__ :Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__UpperCAmelCase , logits=__UpperCAmelCase )
lowerCAmelCase__ :int = tf.nn.log_softmax(__UpperCAmelCase , axis=-1 )
else:
lowerCAmelCase__ :List[str] = shape_list(__UpperCAmelCase )
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ :Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase__ :List[str] = (target >= l_idx) & (target < r_idx)
lowerCAmelCase__ :Optional[int] = tf.where(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase ) - l_idx
if self.div_val == 1:
lowerCAmelCase__ :Tuple = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase__ :str = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase__ :str = self.out_layers[i][0]
lowerCAmelCase__ :Any = self.out_layers[i][1]
if i == 0:
lowerCAmelCase__ :Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase__ :Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase__ :Dict = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[0] )
lowerCAmelCase__ :Optional[Any] = tf.nn.log_softmax(__UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase__ :str = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ :str = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[i] )
lowerCAmelCase__ :Optional[Any] = tf.nn.log_softmax(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase__ :Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__UpperCAmelCase )
if target is not None:
lowerCAmelCase__ :Tuple = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__UpperCAmelCase , -cur_logprob , shape_list(__UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = tf.concat(__UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase__ :Optional[int] = tf.reduce_mean(__UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__UpperCAmelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 560
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :List[Any] = logging.get_logger(__name__)
UpperCamelCase__ :Optional[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "nllb-moe"
A = ["past_key_values"]
A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , SCREAMING_SNAKE_CASE__=12_81_12 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0_5 , SCREAMING_SNAKE_CASE__=0.0_5 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="float32" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=0.0_0_1 , SCREAMING_SNAKE_CASE__=0.0_0_1 , SCREAMING_SNAKE_CASE__="all" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=0.2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = vocab_size
_UpperCamelCase :Optional[int] = max_position_embeddings
_UpperCamelCase :Any = d_model
_UpperCamelCase :Tuple = encoder_ffn_dim
_UpperCamelCase :str = encoder_layers
_UpperCamelCase :List[str] = encoder_attention_heads
_UpperCamelCase :Tuple = decoder_ffn_dim
_UpperCamelCase :str = decoder_layers
_UpperCamelCase :Tuple = decoder_attention_heads
_UpperCamelCase :Optional[int] = dropout
_UpperCamelCase :List[str] = attention_dropout
_UpperCamelCase :Optional[int] = activation_dropout
_UpperCamelCase :int = activation_function
_UpperCamelCase :Union[str, Any] = init_std
_UpperCamelCase :str = encoder_layerdrop
_UpperCamelCase :Any = decoder_layerdrop
_UpperCamelCase :Tuple = use_cache
_UpperCamelCase :int = encoder_layers
_UpperCamelCase :List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase :Optional[Any] = router_z_loss_coef
_UpperCamelCase :Dict = router_aux_loss_coef
_UpperCamelCase :Dict = decoder_sparse_step
_UpperCamelCase :Optional[int] = encoder_sparse_step
_UpperCamelCase :Optional[int] = num_experts
_UpperCamelCase :Tuple = expert_capacity
_UpperCamelCase :Optional[int] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
_UpperCamelCase :Tuple = router_dtype
_UpperCamelCase :str = router_ignore_padding_tokens
_UpperCamelCase :Any = batch_prioritized_routing
_UpperCamelCase :Any = second_expert_policy
_UpperCamelCase :str = normalize_router_prob_before_dropping
_UpperCamelCase :List[Any] = moe_eval_capacity_token_fraction
_UpperCamelCase :Tuple = moe_token_dropout
_UpperCamelCase :List[str] = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 355
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
_UpperCamelCase :str = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase :Union[str, Any] = np.zeros((n + 1,) )
_UpperCamelCase :List[str] = ya
_UpperCamelCase :Any = xa
for k in range(snake_case__ ):
_UpperCamelCase :Union[str, Any] = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : List[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase_ : Any = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCAmelCase ( __a ):
snake_case : Any = """facebook/nllb-200-distilled-600M"""
snake_case : Optional[int] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
snake_case : Dict = """translator"""
snake_case : str = AutoTokenizer
snake_case : Dict = AutoModelForSeqaSeqLM
snake_case : Optional[Any] = LANGUAGE_CODES
snake_case : List[str] = ["""text""", """text""", """text"""]
snake_case : Tuple = ["""text"""]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
_UpperCAmelCase : str = self.lang_to_code[src_lang]
_UpperCAmelCase : Tuple = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
return self.model.generate(**lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 156
| 1
|
from ....utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
def __init__( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=2_0_4_8 ):
lowerCAmelCase : Optional[int] = config.__dict__
lowerCAmelCase : Any = modal_hidden_size
if num_labels:
lowerCAmelCase : Optional[Any] = num_labels
| 348
|
def UpperCAmelCase__ ( __magic_name__ : int ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = 0, 0, 0
lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase : Any = ugly_nums[ia] * 3
lowerCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __magic_name__ ):
lowerCAmelCase : List[str] = min(__magic_name__ , __magic_name__ , __magic_name__ )
ugly_nums.append(__magic_name__ )
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""")
| 348
| 1
|
"""simple docstring"""
A = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
A = {value: key for key, value in encode_dict.items()}
def __A ( a_ :str) -> str:
__a : Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''')
return encoded
def __A ( a_ :str) -> str:
if set(a_) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''')
__a : Optional[int] = ''''''
for word in coded.split():
while len(a_) != 0:
decoded += decode_dict[word[:5]]
__a : Union[str, Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 101
|
"""simple docstring"""
def __A ( a_ :int , a_ :float , a_ :float) -> float:
return round(float(moles / volume) * nfactor)
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume)))
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure)))
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((pressure * volume) / (0.0_8_2_1 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase : int = 'scheduler_config.json'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = ["""dtype"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def UpperCAmelCase_ ( cls , A_ = None , A_ = None , A_=False , **A_ , )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
UpperCamelCase , UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase_ ( self , A_ , A_ = False , **A_ )-> Any:
'''simple docstring'''
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls )-> Any:
'''simple docstring'''
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def A_( A : jnp.ndarray , A : Tuple[int]):
assert len(A) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A) - x.ndim)) , A)
def A_( A : int , A : int=0.999 , A : List[Any]=jnp.floataa):
def alpha_bar(A : Union[str, Any]):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
UpperCamelCase = []
for i in range(A):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A) / alpha_bar(A) , A))
return jnp.array(A , dtype=A)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@classmethod
def UpperCAmelCase_ ( cls , A_ )-> str:
'''simple docstring'''
UpperCamelCase = scheduler.config
if config.trained_betas is not None:
UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase = 1.0 - betas
UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def A_( A : CommonSchedulerState , A : jnp.ndarray , A : jnp.ndarray , A : jnp.ndarray):
UpperCamelCase = state.alphas_cumprod
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(A , original_samples.shape)
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase = broadcast_to_shape_from_left(A , original_samples.shape)
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A_( A : CommonSchedulerState , A : jnp.ndarray , A : jnp.ndarray , A : jnp.ndarray):
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(A , A , A , A)
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A_( A : CommonSchedulerState , A : jnp.ndarray , A : jnp.ndarray , A : jnp.ndarray):
UpperCamelCase , UpperCamelCase = get_sqrt_alpha_prod(A , A , A , A)
UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 3
|
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= data
lowercase__ : Node | None= None
lowercase__ : Node | None= None
def lowercase__(A ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase__(A ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase__(A ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase__() ->None: # Main function for testing.
"""simple docstring"""
lowercase__ : int= Node(1 )
lowercase__ : Union[str, Any]= Node(2 )
lowercase__ : Optional[int]= Node(3 )
lowercase__ : Optional[Any]= Node(4 )
lowercase__ : Optional[Any]= Node(5 )
lowercase__ : Tuple= Node(6 )
lowercase__ : Any= Node(7 )
lowercase__ : Tuple= Node(8 )
lowercase__ : List[Any]= Node(9 )
print(is_full_binary_tree(A ) )
print(depth_of_tree(A ) )
print("Tree is: " )
display(A )
if __name__ == "__main__":
main()
| 218
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'vivit'
def __init__( self : str , lowerCAmelCase_ : List[str]=2_24 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : int=[2, 16, 16] , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu_fast" , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Any=1e-06 , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
'''simple docstring'''
A__ : Union[str, Any] =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : List[Any] =num_attention_heads
A__ : int =intermediate_size
A__ : Optional[int] =hidden_act
A__ : Optional[int] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Optional[int] =initializer_range
A__ : List[str] =layer_norm_eps
A__ : str =image_size
A__ : Dict =num_frames
A__ : str =tubelet_size
A__ : Union[str, Any] =num_channels
A__ : List[Any] =qkv_bias
super().__init__(**lowerCAmelCase_ )
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 0
|
"""simple docstring"""
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase = """facebook/wmt19-en-de"""
_lowerCAmelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_lowerCAmelCase = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_lowerCAmelCase = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 259
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( a_ ):
__lowerCAmelCase = (DDPMScheduler,)
def __magic_name__ ( self , **_a ):
lowercase : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __magic_name__ ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __magic_name__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __magic_name__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __magic_name__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __magic_name__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __magic_name__ ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __magic_name__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __magic_name__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
lowercase : Dict = len(_a )
lowercase : str = self.dummy_model()
lowercase : Optional[int] = self.dummy_sample_deter
lowercase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : str = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : List[str] = pred_prev_sample
lowercase : Dict = torch.sum(torch.abs(_a ) )
lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : int = scheduler_class(**_a )
lowercase : str = len(_a )
lowercase : Optional[int] = self.dummy_model()
lowercase : List[str] = self.dummy_sample_deter
lowercase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : str = torch.sum(torch.abs(_a ) )
lowercase : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**_a )
lowercase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowercase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowercase : Any = -1
else:
lowercase : Union[str, Any] = timesteps[i + 1]
lowercase : Optional[int] = scheduler.previous_timestep(_a )
lowercase : Union[str, Any] = prev_t.item()
self.assertEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __magic_name__ ( self ):
lowercase : Dict = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Any = scheduler_class(**_a )
lowercase : int = [100, 87, 50, 1, 0]
lowercase : Any = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Optional[int] = scheduler_class(**_a )
lowercase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 361
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = AudioClassificationPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
# test with a raw waveform
A__ = np.zeros((3_4000,) )
A__ = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
A__ , A__ = examples
A__ = audio_classifier(_SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] , )
A__ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(_SCREAMING_SNAKE_CASE )
@require_torchaudio
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
import datasets
# test with a local file
A__ = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
A__ = dataset[0]['audio']['array']
A__ = audio_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 'anton-l/wav2vec2-random-tiny-classifier'
A__ = pipeline('audio-classification' , model=_SCREAMING_SNAKE_CASE )
A__ = np.ones((8000,) )
A__ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
A__ = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
A__ = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
A__ = {'array': np.ones((8000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
A__ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
import datasets
A__ = 'superb/wav2vec2-base-superb-ks'
A__ = pipeline('audio-classification' , model=_SCREAMING_SNAKE_CASE )
A__ = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
A__ = np.array(dataset[3]['speech'] , dtype=np.floataa )
A__ = audio_classifier(_SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 713
|
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
A__ = name
A__ = val
def __str__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , __a ):
"""simple docstring"""
return self.val < other.val
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = {}
A__ = {}
A__ = self.build_heap(__a )
def __getitem__( self , __a ):
"""simple docstring"""
return self.get_value(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return (idx - 1) // 2
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return idx * 2 + 1
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return idx * 2 + 2
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return self.heap_dict[key]
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = len(__a ) - 1
A__ = self.get_parent_idx(__a )
for idx, i in enumerate(__a ):
A__ = idx
A__ = i.val
for i in range(__a , -1 , -1 ):
self.sift_down(__a , __a )
return array
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
while True:
A__ = self.get_left_child_idx(__a ) # noqa: E741
A__ = self.get_right_child_idx(__a )
A__ = idx
if l < len(__a ) and array[l] < array[idx]:
A__ = l
if r < len(__a ) and array[r] < array[smallest]:
A__ = r
if smallest != idx:
A__ , A__ = array[smallest], array[idx]
(
(
A__
) , (
A__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A__ = smallest
else:
break
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_parent_idx(__a )
while p >= 0 and self.heap[p] > self.heap[idx]:
A__ , A__ = self.heap[idx], self.heap[p]
A__ , A__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A__ = p
A__ = self.get_parent_idx(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ = self.heap[-1], self.heap[0]
A__ , A__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
self.heap.append(__a )
A__ = len(self.heap ) - 1
A__ = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A__ = new_value
A__ = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE : List[str] = Node('''R''', -1)
SCREAMING_SNAKE_CASE : Tuple = Node('''B''', 6)
SCREAMING_SNAKE_CASE : List[Any] = Node('''A''', 3)
SCREAMING_SNAKE_CASE : int = Node('''X''', 1)
SCREAMING_SNAKE_CASE : Union[str, Any] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE : str = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int]=13 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE: Any=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Optional[int]=37 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=10 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: int=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE: int=[2, 3, 4] , _SCREAMING_SNAKE_CASE: Any=None , ) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Tuple = num_stages
__lowerCAmelCase : List[str] = hidden_sizes
__lowerCAmelCase : List[Any] = depths
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Dict = out_features
__lowerCAmelCase : int = out_indices
__lowerCAmelCase : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : Dict = None
if self.use_labels:
__lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels)
__lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ConvNextModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = ConvNextForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__lowerCAmelCase : int = None
__lowerCAmelCase : Tuple = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = ConvNextModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : int = [*signature.parameters.keys()]
__lowerCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any):
__lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = ConvNextModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def _lowercase ( ) -> Any:
__lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.default_image_processor
__lowerCAmelCase : List[Any] = prepare_img()
__lowerCAmelCase : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : Dict = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
__lowerCAmelCase : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
@require_torch
class A__ ( unittest.TestCase , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = ConvNextConfig
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ConvNextModelTester(self)
| 293
|
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple= logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : Union[str, Any] = ''
else:
__snake_case : str = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__snake_case : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[
: config.hidden_size, :
]
__snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
__snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Dict = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Optional[int] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = dct.pop(UpperCAmelCase_ )
__snake_case : Optional[Any] = val
def __UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=True ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__snake_case : int = 8
# set labels if required
if not base_model:
__snake_case : int = 10_00
__snake_case : Tuple = 'huggingface/label-files'
__snake_case : Dict = 'imagenet-1k-id2label.json'
__snake_case : Tuple = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Union[str, Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Dict = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__snake_case : Optional[Any] = 3_84
__snake_case : Any = 15_36
__snake_case : List[Any] = 12
__snake_case : List[str] = 6
# load original model from torch hub
__snake_case : str = torch.hub.load('facebookresearch/dino:main' , UpperCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
__snake_case : Dict = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if base_model:
__snake_case : int = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ).eval()
else:
__snake_case : str = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__snake_case : List[Any] = ViTImageProcessor()
__snake_case : Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case : Optional[int] = encoding['pixel_values']
__snake_case : Dict = model(UpperCAmelCase_ )
if base_model:
__snake_case : str = original_model(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__snake_case : str = original_model(UpperCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_a : int= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
_a : List[str]= parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 192
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase :
def __init__(self : Dict , _A : Union[str, Any] , _A : Optional[Any]=99 , _A : List[Any]=13 , _A : List[str]=7 , _A : List[Any]=9 , _A : List[Any]=True , _A : Union[str, Any]=True , _A : int=False , _A : Optional[int]=32 , _A : Union[str, Any]=5 , _A : Union[str, Any]=4 , _A : str=37 , _A : List[Any]=8 , _A : Tuple=0.1 , _A : int=0.002 , _A : str=1 , _A : List[str]=0 , _A : Optional[int]=0 , _A : Tuple=None , _A : int=None , ) -> Any:
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Dict = encoder_seq_length
__snake_case : Optional[int] = decoder_seq_length
# For common tests
__snake_case : Dict = self.decoder_seq_length
__snake_case : Dict = is_training
__snake_case : Tuple = use_attention_mask
__snake_case : Optional[int] = use_labels
__snake_case : Any = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Dict = d_ff
__snake_case : List[str] = relative_attention_num_buckets
__snake_case : Optional[Any] = dropout_rate
__snake_case : Any = initializer_factor
__snake_case : Dict = eos_token_id
__snake_case : Optional[Any] = pad_token_id
__snake_case : Optional[int] = decoder_start_token_id
__snake_case : Tuple = None
__snake_case : Optional[Any] = decoder_layers
def _lowercase (self : Union[str, Any]) -> Dict:
return TaConfig.from_pretrained('google/umt5-base')
def _lowercase (self : str , _A : List[Any] , _A : List[Any] , _A : str , _A : str=None , _A : int=None , _A : Optional[int]=None , _A : int=None , _A : Tuple=None , ) -> List[Any]:
if attention_mask is None:
__snake_case : int = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
__snake_case : int = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
__snake_case : List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_A)
if decoder_head_mask is None:
__snake_case : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_A)
if cross_attn_head_mask is None:
__snake_case : Any = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_A)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
__snake_case : int = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
__snake_case : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1)
__snake_case : Any = decoder_input_ids.clamp(self.pad_token_id + 1)
__snake_case : Any = self.get_config()
__snake_case : Dict = config.num_attention_heads
__snake_case : List[str] = self.prepare_inputs_dict(_A , _A , _A)
return config, input_dict
def _lowercase (self : List[Any]) -> List[str]:
__snake_case , __snake_case : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase (self : int) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase (self : Union[str, Any]) -> str:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase (self : Dict , _A : int , _A : Any , _A : str , _A : str , _A : str , _A : Dict , ) -> List[str]:
__snake_case : Dict = UMTaModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(
input_ids=_A , decoder_input_ids=_A , attention_mask=_A , decoder_attention_mask=_A , )
__snake_case : List[str] = model(input_ids=_A , decoder_input_ids=_A)
__snake_case : Dict = result.last_hidden_state
__snake_case : List[Any] = result.past_key_values
__snake_case : Union[str, Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_A) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def _lowercase (self : Union[str, Any] , _A : List[str] , _A : Any , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : str , ) -> Union[str, Any]:
__snake_case : Dict = UMTaModel(config=_A).get_decoder().to(_A).eval()
# first forward pass
__snake_case : Optional[int] = model(_A , use_cache=_A)
__snake_case : str = model(_A)
__snake_case : str = model(_A , use_cache=_A)
self.parent.assertTrue(len(_A) == len(_A))
self.parent.assertTrue(len(_A) == len(_A) + 1)
__snake_case , __snake_case : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
__snake_case : str = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case : Any = model(_A)['last_hidden_state']
__snake_case : List[str] = model(_A , past_key_values=_A)['last_hidden_state']
# select random slice
__snake_case : Dict = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
__snake_case : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3))
def _lowercase (self : List[str] , _A : int , _A : List[str] , ) -> Any:
__snake_case : Optional[Any] = UMTaModel(config=_A).to(_A).half().eval()
__snake_case : str = model(**_A)['last_hidden_state']
self.parent.assertFalse(torch.isnan(_A).any().item())
@require_torch
class UpperCamelCase ( lowercase , lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase : int = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = True
UpperCAmelCase : int = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = True
UpperCAmelCase : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase : Any = [0.8, 0.9]
def _lowercase (self : Optional[int]) -> Union[str, Any]:
__snake_case : Dict = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def _lowercase (self : int) -> int:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
__snake_case : List[Any] = UMTaModel(config_and_inputs[0]).to(_A)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=_A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowercase (self : Dict) -> Dict:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_A)
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__snake_case : int = self.model_tester.prepare_config_and_inputs()
__snake_case : str = config_and_inputs[0]
__snake_case : Any = UMTaForConditionalGeneration(_A).eval()
model.to(_A)
__snake_case : List[str] = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_A),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A),
}
for attn_name, (name, mask) in zip(_A , head_masking.items()):
__snake_case : Optional[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=_A)
__snake_case : Dict = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_A , return_dict_in_generate=_A , **_A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__snake_case : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def _lowercase (self : Dict) -> Tuple:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def _lowercase (self : Dict) -> Optional[Any]:
__snake_case : List[str] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_A).to(_A)
__snake_case : Tuple = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_A , legacy=_A)
__snake_case : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__snake_case : int = tokenizer(_A , return_tensors='pt' , padding=_A).input_ids
# fmt: off
__snake_case : Tuple = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
])
# fmt: on
torch.testing.assert_allclose(_A , _A)
__snake_case : int = model.generate(input_ids.to(_A))
__snake_case : Any = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__snake_case : List[Any] = tokenizer.batch_decode(_A)
self.assertEqual(_A , _A)
| 192
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """maskformer"""
_SCREAMING_SNAKE_CASE = {"""hidden_size""": """mask_feature_size"""}
_SCREAMING_SNAKE_CASE = ["""resnet""", """swin"""]
_SCREAMING_SNAKE_CASE = ["""detr"""]
def __init__( self : str , __SCREAMING_SNAKE_CASE : int = 2_56 , __SCREAMING_SNAKE_CASE : int = 2_56 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict] = None , __SCREAMING_SNAKE_CASE : Optional[Dict] = None , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : float = 20.0 , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : int , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = backbone_config.pop("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop("model_type" ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(__SCREAMING_SNAKE_CASE )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**__SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls : str , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
return cls(
backbone_config=__SCREAMING_SNAKE_CASE , decoder_config=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _UpperCAmelCase ( self : List[Any] ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 197
|
from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "open-llama"
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=100_000 , UpperCamelCase_ : Tuple=4_096 , UpperCamelCase_ : str=11_008 , UpperCamelCase_ : str=32 , UpperCamelCase_ : str=32 , UpperCamelCase_ : Dict="silu" , UpperCamelCase_ : str=2_048 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=1e-6 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=True , UpperCamelCase_ : str=None , **UpperCamelCase_ : Dict , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = rms_norm_eps
lowerCamelCase_ : Dict = use_cache
lowerCamelCase_ : Dict = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_dropout_prob
lowerCamelCase_ : Tuple = use_stable_embedding
lowerCamelCase_ : Any = shared_input_output_embedding
lowerCamelCase_ : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ , )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
lowerCamelCase_ : List[str] = self.rope_scaling.get('''type''' , UpperCamelCase_ )
lowerCamelCase_ : Any = self.rope_scaling.get('''factor''' , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 418
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ : List[str] = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase_ : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCamelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ : Optional[int] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCamelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
| 418
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.